HyperPlatform Programmer's Reference
ept.cpp
Go to the documentation of this file.
1 // Copyright (c) 2015-2018, Satoshi Tanda. All rights reserved.
2 // Use of this source code is governed by a MIT-style license that can be
3 // found in the LICENSE file.
4 
7 
8 #include "ept.h"
9 #include "asm.h"
10 #include "common.h"
11 #include "log.h"
12 #include "util.h"
13 #include "performance.h"
14 
15 extern "C" {
17 //
18 // macro utilities
19 //
20 
22 //
23 // constants and macros
24 //
25 
26 // Followings are how 64bits of a physical address is used to locate EPT
27 // entries:
28 //
29 // EPT Page map level 4 selector 9 bits
30 // EPT Page directory pointer selector 9 bits
31 // EPT Page directory selector 9 bits
32 // EPT Page table selector 9 bits
33 // EPT Byte within page 12 bits
34 
35 // Get the highest 25 bits
36 static const auto kEptpPxiShift = 39ull;
37 
38 // Get the highest 34 bits
39 static const auto kEptpPpiShift = 30ull;
40 
41 // Get the highest 43 bits
42 static const auto kEptpPdiShift = 21ull;
43 
44 // Get the highest 52 bits
45 static const auto kEptpPtiShift = 12ull;
46 
47 // Use 9 bits; 0b0000_0000_0000_0000_0000_0000_0001_1111_1111
48 static const auto kEptpPtxMask = 0x1ffull;
49 
50 // How many EPT entries are preallocated. When the number exceeds it, the
51 // hypervisor issues a bugcheck.
52 static const auto kEptpNumberOfPreallocatedEntries = 50;
53 
54 // Architecture defined number of variable range MTRRs
55 static const auto kEptpNumOfMaxVariableRangeMtrrs = 255;
56 
57 // Architecture defined number of fixed range MTRRs. 1 register for 64k, 2
58 // registers for 16k, 8 registers for 4k, and each register has 8 ranges as per
59 // "Fixed Range MTRRs" states.
60 static const auto kEptpNumOfFixedRangeMtrrs =
61  (1 + 2 + 8) * RTL_NUMBER_OF_FIELD(Ia32MtrrFixedRangeMsr, fields.types);
62 
63 // A size of array to store all possible MTRRs
64 static const auto kEptpMtrrEntriesSize =
66 
68 //
69 // types
70 //
71 
72 #include <pshpack1.h>
73 struct MtrrData {
74  bool enabled; //<! Whether this entry is valid
75  bool fixedMtrr; //<! Whether this entry manages a fixed range MTRR
76  UCHAR type; //<! Memory Type (such as WB, UC)
77  bool reserverd1; //<! Padding
78  ULONG reserverd2; //<! Padding
79  ULONG64 range_base; //<! A base address of a range managed by this entry
80  ULONG64 range_end; //<! An end address of a range managed by this entry
81 };
82 #include <poppack.h>
83 static_assert(sizeof(MtrrData) == 24, "Size check");
84 
85 // EPT related data stored in ProcessorData
86 struct EptData {
89 
90  EptCommonEntry **preallocated_entries; // An array of pre-allocated entries
91  volatile long preallocated_entries_count; // # of used pre-allocated entries
92 };
93 
95 //
96 // prototypes
97 //
98 
99 static memory_type EptpGetMemoryType(_In_ ULONG64 physical_address);
100 
101 _When_(ept_data == nullptr,
102  _IRQL_requires_max_(DISPATCH_LEVEL)) static EptCommonEntry
103  *EptpConstructTables(_In_ EptCommonEntry *table, _In_ ULONG table_level,
104  _In_ ULONG64 physical_address,
105  _In_opt_ EptData *ept_data);
106 
107 static void EptpDestructTables(_In_ EptCommonEntry *table,
108  _In_ ULONG table_level);
109 
110 _Must_inspect_result_ _When_(ept_data == nullptr,
111  __drv_allocatesMem(Mem) _IRQL_requires_max_(
112  DISPATCH_LEVEL)) static EptCommonEntry
113  *EptpAllocateEptEntry(_In_opt_ EptData *ept_data);
114 
116  _In_ EptData *ept_data);
117 
118 _Must_inspect_result_ __drv_allocatesMem(Mem) _IRQL_requires_max_(
119  DISPATCH_LEVEL) static EptCommonEntry *EptpAllocateEptEntryFromPool();
120 
121 static void EptpInitTableEntry(_In_ EptCommonEntry *Entry,
122  _In_ ULONG table_level,
123  _In_ ULONG64 physical_address);
124 
125 static ULONG64 EptpAddressToPxeIndex(_In_ ULONG64 physical_address);
126 
127 static ULONG64 EptpAddressToPpeIndex(_In_ ULONG64 physical_address);
128 
129 static ULONG64 EptpAddressToPdeIndex(_In_ ULONG64 physical_address);
130 
131 static ULONG64 EptpAddressToPteIndex(_In_ ULONG64 physical_address);
132 
133 static bool EptpIsDeviceMemory(_In_ ULONG64 physical_address);
134 
136  _In_ ULONG table_level,
137  _In_ ULONG64 physical_address);
138 
140  _Pre_notnull_ __drv_freesMem(Mem) EptCommonEntry **preallocated_entries,
141  _In_ long used_count);
142 
143 #if defined(ALLOC_PRAGMA)
144 #pragma alloc_text(PAGE, EptIsEptAvailable)
145 #pragma alloc_text(PAGE, EptInitialization)
146 #pragma alloc_text(PAGE, EptInitializeMtrrEntries)
147 #endif
148 
150 //
151 // variables
152 //
153 
156 
158 //
159 // implementations
160 //
161 
162 // Checks if the system supports EPT technology sufficient enough
163 _Use_decl_annotations_ bool EptIsEptAvailable() {
164  PAGED_CODE();
165 
166  // Check the followings:
167  // - page walk length is 4 steps
168  // - extended page tables can be laid out in write-back memory
169  // - INVEPT instruction with all possible types is supported
170  // - INVVPID instruction with all possible types is supported
172  if (!capability.fields.support_page_walk_length4 ||
174  !capability.fields.support_invept ||
176  !capability.fields.support_all_context_invept ||
177  !capability.fields.support_invvpid ||
180  !capability.fields.support_all_context_invvpid ||
182  return false;
183  }
184  return true;
185 }
186 
187 // Returns an EPT pointer from ept_data
188 _Use_decl_annotations_ ULONG64 EptGetEptPointer(EptData *ept_data) {
189  return ept_data->ept_pointer->all;
190 }
191 
192 // Reads and stores all MTRRs to set a correct memory type for EPT
193 _Use_decl_annotations_ void EptInitializeMtrrEntries() {
194  PAGED_CODE();
195 
196  int index = 0;
197  MtrrData *mtrr_entries = g_eptp_mtrr_entries;
198 
199  // Get and store the default memory type
202 
203  // Read MTRR capability
204  Ia32MtrrCapabilitiesMsr mtrr_capabilities = {
207  "MTRR Default=%llu, VariableCount=%llu, FixedSupported=%llu, "
208  "FixedEnabled=%llu",
209  default_type.fields.default_mtemory_type,
210  mtrr_capabilities.fields.variable_range_count,
211  mtrr_capabilities.fields.fixed_range_supported,
212  default_type.fields.fixed_mtrrs_enabled);
213 
214  // Read fixed range MTRRs if supported
215  if (mtrr_capabilities.fields.fixed_range_supported &&
216  default_type.fields.fixed_mtrrs_enabled) {
217  static const auto k64kBase = 0x0;
218  static const auto k64kManagedSize = 0x10000;
219  static const auto k16kBase = 0x80000;
220  static const auto k16kManagedSize = 0x4000;
221  static const auto k4kBase = 0xC0000;
222  static const auto k4kManagedSize = 0x1000;
223 
224  // The kIa32MtrrFix64k00000 manages 8 ranges of memory. The first range
225  // starts at 0x0, and each range manages a 64k (0x10000) range. For example,
226  // entry[0]: 0x0 : 0x10000 - 1
227  // entry[1]: 0x10000 : 0x20000 - 1
228  // ...
229  // entry[7]: 0x70000 : 0x80000 - 1
230  ULONG64 offset = 0;
231  Ia32MtrrFixedRangeMsr fixed_range = {
233  for (auto memory_type : fixed_range.fields.types) {
234  // Each entry manages 64k (0x10000) length.
235  ULONG64 base = k64kBase + offset;
236  offset += k64kManagedSize;
237 
238  // Saves the MTRR
239  mtrr_entries[index].enabled = true;
240  mtrr_entries[index].fixedMtrr = true;
241  mtrr_entries[index].type = memory_type;
242  mtrr_entries[index].range_base = base;
243  mtrr_entries[index].range_end = base + k64kManagedSize - 1;
244  index++;
245  }
246  NT_ASSERT(k64kBase + offset == k16kBase);
247 
248  // kIa32MtrrFix16k80000 manages 8 ranges of memory. The first range starts
249  // at 0x80000, and each range manages a 16k (0x4000) range. For example,
250  // entry[0]: 0x80000 : 0x84000 - 1
251  // entry[1]: 0x88000 : 0x8C000 - 1
252  // ...
253  // entry[7]: 0x9C000 : 0xA0000 - 1
254  // Also, subsequent memory ranges are managed by other MSR,
255  // kIa32MtrrFix16kA0000, which manages 8 ranges of memory starting at
256  // 0xA0000 in the same fashion. For example,
257  // entry[0]: 0xA0000 : 0xA4000 - 1
258  // entry[1]: 0xA8000 : 0xAC000 - 1
259  // ...
260  // entry[7]: 0xBC000 : 0xC0000 - 1
261  offset = 0;
262  for (auto msr = static_cast<ULONG>(Msr::kIa32MtrrFix16k80000);
263  msr <= static_cast<ULONG>(Msr::kIa32MtrrFix16kA0000); msr++) {
264  fixed_range.all = UtilReadMsr64(static_cast<Msr>(msr));
265  for (auto memory_type : fixed_range.fields.types) {
266  // Each entry manages 16k (0x4000) length.
267  ULONG64 base = k16kBase + offset;
268  offset += k16kManagedSize;
269 
270  // Saves the MTRR
271  mtrr_entries[index].enabled = true;
272  mtrr_entries[index].fixedMtrr = true;
273  mtrr_entries[index].type = memory_type;
274  mtrr_entries[index].range_base = base;
275  mtrr_entries[index].range_end = base + k16kManagedSize - 1;
276  index++;
277  }
278  }
279  NT_ASSERT(k16kBase + offset == k4kBase);
280 
281  // kIa32MtrrFix4kC0000 manages 8 ranges of memory. The first range starts
282  // at 0xC0000, and each range manages a 4k (0x1000) range. For example,
283  // entry[0]: 0xC0000 : 0xC1000 - 1
284  // entry[1]: 0xC1000 : 0xC2000 - 1
285  // ...
286  // entry[7]: 0xC7000 : 0xC8000 - 1
287  // Also, subsequent memory ranges are managed by other MSRs such as
288  // kIa32MtrrFix4kC8000, kIa32MtrrFix4kD0000, and kIa32MtrrFix4kF8000. Each
289  // MSR manages 8 ranges of memory in the same fashion up to 0x100000.
290  offset = 0;
291  for (auto msr = static_cast<ULONG>(Msr::kIa32MtrrFix4kC0000);
292  msr <= static_cast<ULONG>(Msr::kIa32MtrrFix4kF8000); msr++) {
293  fixed_range.all = UtilReadMsr64(static_cast<Msr>(msr));
294  for (auto memory_type : fixed_range.fields.types) {
295  // Each entry manages 4k (0x1000) length.
296  ULONG64 base = k4kBase + offset;
297  offset += k4kManagedSize;
298 
299  // Saves the MTRR
300  mtrr_entries[index].enabled = true;
301  mtrr_entries[index].fixedMtrr = true;
302  mtrr_entries[index].type = memory_type;
303  mtrr_entries[index].range_base = base;
304  mtrr_entries[index].range_end = base + k4kManagedSize - 1;
305  index++;
306  }
307  }
308  NT_ASSERT(k4kBase + offset == 0x100000);
309  }
310 
311  // Read all variable range MTRRs
312  for (auto i = 0; i < mtrr_capabilities.fields.variable_range_count; i++) {
313  // Read MTRR mask and check if it is in use
314  const auto phy_mask = static_cast<ULONG>(Msr::kIa32MtrrPhysMaskN) + i * 2;
315  Ia32MtrrPhysMaskMsr mtrr_mask = {UtilReadMsr64(static_cast<Msr>(phy_mask))};
316  if (!mtrr_mask.fields.valid) {
317  continue;
318  }
319 
320  // Get a length this MTRR manages
321  ULONG length;
322  BitScanForward64(&length, mtrr_mask.fields.phys_mask * PAGE_SIZE);
323 
324  // Read MTRR base and calculate a range this MTRR manages
325  const auto phy_base = static_cast<ULONG>(Msr::kIa32MtrrPhysBaseN) + i * 2;
326  Ia32MtrrPhysBaseMsr mtrr_base = {UtilReadMsr64(static_cast<Msr>(phy_base))};
327  ULONG64 base = mtrr_base.fields.phys_base * PAGE_SIZE;
328  ULONG64 end = base + (1ull << length) - 1;
329 
330  // Save it
331  mtrr_entries[index].enabled = true;
332  mtrr_entries[index].fixedMtrr = false;
333  mtrr_entries[index].type = mtrr_base.fields.type;
334  mtrr_entries[index].range_base = base;
335  mtrr_entries[index].range_end = end;
336  index++;
337  }
338 }
339 
340 // Returns a memory type based on MTRRs
341 _Use_decl_annotations_ static memory_type EptpGetMemoryType(
342  ULONG64 physical_address) {
343  // Indicate that MTRR is not defined (as a default)
344  UCHAR result_type = MAXUCHAR;
345 
346  // Looks for MTRR that includes the specified physical_address
347  for (const auto mtrr_entry : g_eptp_mtrr_entries) {
348  if (!mtrr_entry.enabled) {
349  // Reached out the end of stored MTRRs
350  break;
351  }
352 
353  if (!UtilIsInBounds(physical_address, mtrr_entry.range_base,
354  mtrr_entry.range_end)) {
355  // This MTRR does not describe a memory type of the physical_address
356  continue;
357  }
358 
359  // See: MTRR Precedences
360  if (mtrr_entry.fixedMtrr) {
361  // If a fixed MTRR describes a memory type, it is priority
362  result_type = mtrr_entry.type;
363  break;
364  }
365 
366  if (mtrr_entry.type == static_cast<UCHAR>(memory_type::kUncacheable)) {
367  // If a memory type is UC, it is priority. Do not continue to search as
368  // UC has the highest priority
369  result_type = mtrr_entry.type;
370  break;
371  }
372 
373  if (result_type == static_cast<UCHAR>(memory_type::kWriteThrough) ||
374  mtrr_entry.type == static_cast<UCHAR>(memory_type::kWriteThrough)) {
375  if (result_type == static_cast<UCHAR>(memory_type::kWriteBack)) {
376  // If two or more MTRRs describes an over-wrapped memory region, and
377  // one is WT and the other one is WB, use WT. However, look for other
378  // MTRRs, as the other MTRR specifies the memory address as UC, which is
379  // priority.
380  result_type = static_cast<UCHAR>(memory_type::kWriteThrough);
381  continue;
382  }
383  }
384 
385  // Otherwise, processor behavior is undefined. We just use the last MTRR
386  // describes the memory address.
387  result_type = mtrr_entry.type;
388  }
389 
390  // Use the default MTRR if no MTRR entry is found
391  if (result_type == MAXUCHAR) {
392  result_type = g_eptp_mtrr_default_type;
393  }
394 
395  return static_cast<memory_type>(result_type);
396 }
397 
398 // Builds EPT, allocates pre-allocated entires, initializes and returns EptData
399 _Use_decl_annotations_ EptData *EptInitialization() {
400  PAGED_CODE();
401 
402  static const auto kEptPageWalkLevel = 4ul;
403 
404  // Allocate ept_data
405  const auto ept_data = reinterpret_cast<EptData *>(ExAllocatePoolWithTag(
406  NonPagedPool, sizeof(EptData), kHyperPlatformCommonPoolTag));
407  if (!ept_data) {
408  return nullptr;
409  }
410  RtlZeroMemory(ept_data, sizeof(EptData));
411 
412  // Allocate EptPointer
413  const auto ept_poiner = reinterpret_cast<EptPointer *>(ExAllocatePoolWithTag(
414  NonPagedPool, PAGE_SIZE, kHyperPlatformCommonPoolTag));
415  if (!ept_poiner) {
416  ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
417  return nullptr;
418  }
419  RtlZeroMemory(ept_poiner, PAGE_SIZE);
420 
421  // Allocate EPT_PML4 and initialize EptPointer
422  const auto ept_pml4 =
423  reinterpret_cast<EptCommonEntry *>(ExAllocatePoolWithTag(
424  NonPagedPool, PAGE_SIZE, kHyperPlatformCommonPoolTag));
425  if (!ept_pml4) {
426  ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
427  ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
428  return nullptr;
429  }
430  RtlZeroMemory(ept_pml4, PAGE_SIZE);
431  ept_poiner->fields.memory_type =
432  static_cast<ULONG64>(EptpGetMemoryType(UtilPaFromVa(ept_pml4)));
433  ept_poiner->fields.page_walk_length = kEptPageWalkLevel - 1;
434  ept_poiner->fields.pml4_address = UtilPfnFromPa(UtilPaFromVa(ept_pml4));
435 
436  // Initialize all EPT entries for all physical memory pages
437  const auto pm_ranges = UtilGetPhysicalMemoryRanges();
438  for (auto run_index = 0ul; run_index < pm_ranges->number_of_runs;
439  ++run_index) {
440  const auto run = &pm_ranges->run[run_index];
441  const auto base_addr = run->base_page * PAGE_SIZE;
442  for (auto page_index = 0ull; page_index < run->page_count; ++page_index) {
443  const auto indexed_addr = base_addr + page_index * PAGE_SIZE;
444  const auto ept_pt_entry =
445  EptpConstructTables(ept_pml4, 4, indexed_addr, nullptr);
446  if (!ept_pt_entry) {
447  EptpDestructTables(ept_pml4, 4);
448  ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
449  ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
450  return nullptr;
451  }
452  }
453  }
454 
455  // Initialize an EPT entry for APIC_BASE. It is required to allocated it now
456  // for some reasons, or else, system hangs.
458  if (!EptpConstructTables(ept_pml4, 4, apic_msr.fields.apic_base * PAGE_SIZE,
459  nullptr)) {
460  EptpDestructTables(ept_pml4, 4);
461  ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
462  ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
463  return nullptr;
464  }
465 
466  // Allocate preallocated_entries
467  const auto preallocated_entries_size =
469  const auto preallocated_entries = reinterpret_cast<EptCommonEntry **>(
470  ExAllocatePoolWithTag(NonPagedPool, preallocated_entries_size,
472  if (!preallocated_entries) {
473  EptpDestructTables(ept_pml4, 4);
474  ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
475  ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
476  return nullptr;
477  }
478  RtlZeroMemory(preallocated_entries, preallocated_entries_size);
479 
480  // And fill preallocated_entries with newly created entries
481  for (auto i = 0ul; i < kEptpNumberOfPreallocatedEntries; ++i) {
482  const auto ept_entry = EptpAllocateEptEntry(nullptr);
483  if (!ept_entry) {
484  EptpFreeUnusedPreAllocatedEntries(preallocated_entries, 0);
485  EptpDestructTables(ept_pml4, 4);
486  ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
487  ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
488  return nullptr;
489  }
490  preallocated_entries[i] = ept_entry;
491  }
492 
493  // Initialization completed
494  ept_data->ept_pointer = ept_poiner;
495  ept_data->ept_pml4 = ept_pml4;
496  ept_data->preallocated_entries = preallocated_entries;
497  ept_data->preallocated_entries_count = 0;
498  return ept_data;
499 }
500 
501 // Allocate and initialize all EPT entries associated with the physical_address
502 _Use_decl_annotations_ static EptCommonEntry *EptpConstructTables(
503  EptCommonEntry *table, ULONG table_level, ULONG64 physical_address,
504  EptData *ept_data) {
505  switch (table_level) {
506  case 4: {
507  // table == PML4 (512 GB)
508  const auto pxe_index = EptpAddressToPxeIndex(physical_address);
509  const auto ept_pml4_entry = &table[pxe_index];
510  if (!ept_pml4_entry->all) {
511  const auto ept_pdpt = EptpAllocateEptEntry(ept_data);
512  if (!ept_pdpt) {
513  return nullptr;
514  }
515  EptpInitTableEntry(ept_pml4_entry, table_level, UtilPaFromVa(ept_pdpt));
516  }
517  return EptpConstructTables(
518  reinterpret_cast<EptCommonEntry *>(
519  UtilVaFromPfn(ept_pml4_entry->fields.physial_address)),
520  table_level - 1, physical_address, ept_data);
521  }
522  case 3: {
523  // table == PDPT (1 GB)
524  const auto ppe_index = EptpAddressToPpeIndex(physical_address);
525  const auto ept_pdpt_entry = &table[ppe_index];
526  if (!ept_pdpt_entry->all) {
527  const auto ept_pdt = EptpAllocateEptEntry(ept_data);
528  if (!ept_pdt) {
529  return nullptr;
530  }
531  EptpInitTableEntry(ept_pdpt_entry, table_level, UtilPaFromVa(ept_pdt));
532  }
533  return EptpConstructTables(
534  reinterpret_cast<EptCommonEntry *>(
535  UtilVaFromPfn(ept_pdpt_entry->fields.physial_address)),
536  table_level - 1, physical_address, ept_data);
537  }
538  case 2: {
539  // table == PDT (2 MB)
540  const auto pde_index = EptpAddressToPdeIndex(physical_address);
541  const auto ept_pdt_entry = &table[pde_index];
542  if (!ept_pdt_entry->all) {
543  const auto ept_pt = EptpAllocateEptEntry(ept_data);
544  if (!ept_pt) {
545  return nullptr;
546  }
547  EptpInitTableEntry(ept_pdt_entry, table_level, UtilPaFromVa(ept_pt));
548  }
549  return EptpConstructTables(
550  reinterpret_cast<EptCommonEntry *>(
551  UtilVaFromPfn(ept_pdt_entry->fields.physial_address)),
552  table_level - 1, physical_address, ept_data);
553  }
554  case 1: {
555  // table == PT (4 KB)
556  const auto pte_index = EptpAddressToPteIndex(physical_address);
557  const auto ept_pt_entry = &table[pte_index];
558  NT_ASSERT(!ept_pt_entry->all);
559  EptpInitTableEntry(ept_pt_entry, table_level, physical_address);
560  return ept_pt_entry;
561  }
562  default:
564  return nullptr;
565  }
566 }
567 
568 // Return a new EPT entry either by creating new one or from pre-allocated ones
569 _Use_decl_annotations_ static EptCommonEntry *EptpAllocateEptEntry(
570  EptData *ept_data) {
571  if (ept_data) {
572  return EptpAllocateEptEntryFromPreAllocated(ept_data);
573  } else {
575  }
576 }
577 
578 // Return a new EPT entry from pre-allocated ones.
579 _Use_decl_annotations_ static EptCommonEntry *
581  const auto count =
582  InterlockedIncrement(&ept_data->preallocated_entries_count);
583  if (count > kEptpNumberOfPreallocatedEntries) {
586  reinterpret_cast<ULONG_PTR>(ept_data), 0);
587  }
588  return ept_data->preallocated_entries[count - 1];
589 }
590 
591 // Return a new EPT entry either by creating new one
592 _Use_decl_annotations_ static EptCommonEntry *EptpAllocateEptEntryFromPool() {
593  static const auto kAllocSize = 512 * sizeof(EptCommonEntry);
594  static_assert(kAllocSize == PAGE_SIZE, "Size check");
595 
596  const auto entry = reinterpret_cast<EptCommonEntry *>(ExAllocatePoolWithTag(
597  NonPagedPool, kAllocSize, kHyperPlatformCommonPoolTag));
598  if (!entry) {
599  return entry;
600  }
601  RtlZeroMemory(entry, kAllocSize);
602  return entry;
603 }
604 
605 // Initialize an EPT entry with a "pass through" attribute
606 _Use_decl_annotations_ static void EptpInitTableEntry(
607  EptCommonEntry *entry, ULONG table_level, ULONG64 physical_address) {
608  entry->fields.read_access = true;
609  entry->fields.write_access = true;
610  entry->fields.execute_access = true;
611  entry->fields.physial_address = UtilPfnFromPa(physical_address);
612  if (table_level == 1) {
613  entry->fields.memory_type =
614  static_cast<ULONG64>(EptpGetMemoryType(physical_address));
615  }
616 }
617 
618 // Return an address of PXE
619 _Use_decl_annotations_ static ULONG64 EptpAddressToPxeIndex(
620  ULONG64 physical_address) {
621  const auto index = (physical_address >> kEptpPxiShift) & kEptpPtxMask;
622  return index;
623 }
624 
625 // Return an address of PPE
626 _Use_decl_annotations_ static ULONG64 EptpAddressToPpeIndex(
627  ULONG64 physical_address) {
628  const auto index = (physical_address >> kEptpPpiShift) & kEptpPtxMask;
629  return index;
630 }
631 
632 // Return an address of PDE
633 _Use_decl_annotations_ static ULONG64 EptpAddressToPdeIndex(
634  ULONG64 physical_address) {
635  const auto index = (physical_address >> kEptpPdiShift) & kEptpPtxMask;
636  return index;
637 }
638 
639 // Return an address of PTE
640 _Use_decl_annotations_ static ULONG64 EptpAddressToPteIndex(
641  ULONG64 physical_address) {
642  const auto index = (physical_address >> kEptpPtiShift) & kEptpPtxMask;
643  return index;
644 }
645 
646 // Deal with EPT violation VM-exit.
647 _Use_decl_annotations_ void EptHandleEptViolation(EptData *ept_data) {
648  const EptViolationQualification exit_qualification = {
650 
651  const auto fault_pa = UtilVmRead64(VmcsField::kGuestPhysicalAddress);
652  const auto fault_va = reinterpret_cast<void *>(
653  exit_qualification.fields.valid_guest_linear_address
655  : 0);
656 
657  if (exit_qualification.fields.ept_readable ||
658  exit_qualification.fields.ept_writeable ||
659  exit_qualification.fields.ept_executable) {
661  HYPERPLATFORM_LOG_ERROR_SAFE("[UNK1] VA = %p, PA = %016llx", fault_va,
662  fault_pa);
663  return;
664  }
665 
666  const auto ept_entry = EptGetEptPtEntry(ept_data, fault_pa);
667  if (ept_entry && ept_entry->all) {
669  HYPERPLATFORM_LOG_ERROR_SAFE("[UNK2] VA = %p, PA = %016llx", fault_va,
670  fault_pa);
671  return;
672  }
673 
674  // EPT entry miss. It should be device memory.
676  if (!IsReleaseBuild()) {
677  NT_VERIFY(EptpIsDeviceMemory(fault_pa));
678  }
679  EptpConstructTables(ept_data->ept_pml4, 4, fault_pa, ept_data);
680 
682 }
683 
684 // Returns if the physical_address is device memory (which could not have a
685 // corresponding PFN entry)
686 _Use_decl_annotations_ static bool EptpIsDeviceMemory(
687  ULONG64 physical_address) {
688  const auto pm_ranges = UtilGetPhysicalMemoryRanges();
689  for (auto i = 0ul; i < pm_ranges->number_of_runs; ++i) {
690  const auto current_run = &pm_ranges->run[i];
691  const auto base_addr =
692  static_cast<ULONG64>(current_run->base_page) * PAGE_SIZE;
693  const auto endAddr = base_addr + current_run->page_count * PAGE_SIZE - 1;
694  if (UtilIsInBounds(physical_address, base_addr, endAddr)) {
695  return false;
696  }
697  }
698  return true;
699 }
700 
701 // Returns an EPT entry corresponds to the physical_address
702 _Use_decl_annotations_ EptCommonEntry *EptGetEptPtEntry(
703  EptData *ept_data, ULONG64 physical_address) {
704  return EptpGetEptPtEntry(ept_data->ept_pml4, 4, physical_address);
705 }
706 
707 // Returns an EPT entry corresponds to the physical_address
708 _Use_decl_annotations_ static EptCommonEntry *EptpGetEptPtEntry(
709  EptCommonEntry *table, ULONG table_level, ULONG64 physical_address) {
710  if (!table) {
711  return nullptr;
712  }
713  switch (table_level) {
714  case 4: {
715  // table == PML4
716  const auto pxe_index = EptpAddressToPxeIndex(physical_address);
717  const auto ept_pml4_entry = &table[pxe_index];
718  if (!ept_pml4_entry->all) {
719  return nullptr;
720  }
721  return EptpGetEptPtEntry(reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn(
722  ept_pml4_entry->fields.physial_address)),
723  table_level - 1, physical_address);
724  }
725  case 3: {
726  // table == PDPT
727  const auto ppe_index = EptpAddressToPpeIndex(physical_address);
728  const auto ept_pdpt_entry = &table[ppe_index];
729  if (!ept_pdpt_entry->all) {
730  return nullptr;
731  }
732  return EptpGetEptPtEntry(reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn(
733  ept_pdpt_entry->fields.physial_address)),
734  table_level - 1, physical_address);
735  }
736  case 2: {
737  // table == PDT
738  const auto pde_index = EptpAddressToPdeIndex(physical_address);
739  const auto ept_pdt_entry = &table[pde_index];
740  if (!ept_pdt_entry->all) {
741  return nullptr;
742  }
743  return EptpGetEptPtEntry(reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn(
744  ept_pdt_entry->fields.physial_address)),
745  table_level - 1, physical_address);
746  }
747  case 1: {
748  // table == PT
749  const auto pte_index = EptpAddressToPteIndex(physical_address);
750  const auto ept_pt_entry = &table[pte_index];
751  return ept_pt_entry;
752  }
753  default:
755  return nullptr;
756  }
757 }
758 
759 // Frees all EPT stuff
760 _Use_decl_annotations_ void EptTermination(EptData *ept_data) {
761  HYPERPLATFORM_LOG_DEBUG("Used pre-allocated entries = %2d / %2d",
762  ept_data->preallocated_entries_count,
764 
766  ept_data->preallocated_entries_count);
767  EptpDestructTables(ept_data->ept_pml4, 4);
768  ExFreePoolWithTag(ept_data->ept_pointer, kHyperPlatformCommonPoolTag);
769  ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
770 }
771 
772 // Frees all unused pre-allocated EPT entries. Other used entries should be
773 // freed with EptpDestructTables().
774 _Use_decl_annotations_ static void EptpFreeUnusedPreAllocatedEntries(
775  EptCommonEntry **preallocated_entries, long used_count) {
776  for (auto i = used_count; i < kEptpNumberOfPreallocatedEntries; ++i) {
777  if (!preallocated_entries[i]) {
778  break;
779  }
780 #pragma warning(push)
781 #pragma warning(disable : 6001)
782  ExFreePoolWithTag(preallocated_entries[i], kHyperPlatformCommonPoolTag);
783 #pragma warning(pop)
784  }
785  ExFreePoolWithTag(preallocated_entries, kHyperPlatformCommonPoolTag);
786 }
787 
788 // Frees all used EPT entries by walking through whole EPT
789 _Use_decl_annotations_ static void EptpDestructTables(EptCommonEntry *table,
790  ULONG table_level) {
791  for (auto i = 0ul; i < 512; ++i) {
792  const auto entry = table[i];
793  if (entry.fields.physial_address) {
794  const auto sub_table = reinterpret_cast<EptCommonEntry *>(
795  UtilVaFromPfn(entry.fields.physial_address));
796 
797  switch (table_level) {
798  case 4: // table == PML4, sub_table == PDPT
799  case 3: // table == PDPT, sub_table == PDT
800  EptpDestructTables(sub_table, table_level - 1);
801  break;
802  case 2: // table == PDT, sub_table == PT
803  ExFreePoolWithTag(sub_table, kHyperPlatformCommonPoolTag);
804  break;
805  default:
807  break;
808  }
809  }
810  }
811  ExFreePoolWithTag(table, kHyperPlatformCommonPoolTag);
812 }
813 
814 } // extern "C"
static memory_type EptpGetMemoryType(_In_ ULONG64 physical_address)
struct Ia32VmxEptVpidCapMsr::@31 fields
memory_type
See: Memory Types That Can Be Encoded With PAT Memory Types Recommended for VMCS and Related Data Str...
Definition: ia32_type.h:955
ULONG64 physial_address
[12:48-1]
Definition: ept.h:40
static EptCommonEntry * EptpAllocateEptEntryFromPreAllocated(_In_ EptData *ept_data)
#define HYPERPLATFORM_LOG_DEBUG(format,...)
Logs a message as respective severity.
Definition: log.h:34
static ULONG64 EptpAddressToPxeIndex(_In_ ULONG64 physical_address)
struct EptCommonEntry::@0 fields
unsigned support_single_context_retaining_globals_invvpid
[43]
Definition: ia32_type.h:1212
ULONG64 memory_type
[3:5]
Definition: ept.h:38
See: Extended-Page-Table Pointer (EPTP)
Definition: ia32_type.h:1419
#define HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE()
Definition: performance.h:30
EptCommonEntry * EptGetEptPtEntry(EptData *ept_data, ULONG64 physical_address)
Definition: ept.cpp:702
static MtrrData g_eptp_mtrr_entries[kEptpMtrrEntriesSize]
Definition: ept.cpp:154
#define HYPERPLATFORM_COMMON_BUG_CHECK(hp_bug_check_code, param1, param2, param3)
Issues a bug check.
Definition: common.h:69
static const ULONG kHyperPlatformCommonPoolTag
A pool tag.
Definition: common.h:93
Declares interfaces to performance measurement functions.
EptPointer * ept_pointer
Definition: ept.cpp:87
#define HYPERPLATFORM_COMMON_DBG_BREAK()
Sets a break point that works only when a debugger is present.
Definition: common.h:55
static const auto kEptpNumberOfPreallocatedEntries
Definition: ept.cpp:52
ULONG64 apic_base
[12:35]
Definition: ia32_type.h:570
static UCHAR g_eptp_mtrr_default_type
Definition: ept.cpp:155
const PhysicalMemoryDescriptor * UtilGetPhysicalMemoryRanges()
Returns ranges of physical memory on the system.
Definition: util.cpp:403
See: VPID AND EPT CAPABILITIES.
Definition: ia32_type.h:1187
unsigned support_single_context_invvpid
[41]
Definition: ia32_type.h:1210
struct Ia32ApicBaseMsr::@19 fields
struct Ia32MtrrPhysMaskMsr::@18 fields
unsigned support_page_walk_length4
[6]
Definition: ia32_type.h:1192
unsigned support_all_context_invvpid
[42]
Definition: ia32_type.h:1211
static EptCommonEntry * EptpGetEptPtEntry(_In_ EptCommonEntry *table, _In_ ULONG table_level, _In_ ULONG64 physical_address)
static ULONG64 EptpAddressToPdeIndex(_In_ ULONG64 physical_address)
struct Ia32MtrrFixedRangeMsr::@16 fields
ULONG64 UtilVmRead64(VmcsField field)
Definition: util.cpp:718
See: IA32_MTRR_PHYSBASEn and IA32_MTRR_PHYSMASKn Variable-Range Register Pair.
Definition: ia32_type.h:539
Declares interfaces to EPT functions.
void * UtilVaFromPfn(PFN_NUMBER pfn)
Definition: util.cpp:618
VmxStatus UtilInveptGlobal()
Executes the INVEPT instruction and invalidates EPT entry cache.
Definition: util.cpp:787
static EptCommonEntry * EptpAllocateEptEntryFromPool()
Definition: ept.cpp:592
unsigned support_all_context_invept
[26]
Definition: ia32_type.h:1205
ULONG64 valid_guest_linear_address
[7]
Definition: ia32_type.h:1579
bool fixedMtrr
Definition: ept.cpp:75
unsigned support_single_context_invept
[25]
Definition: ia32_type.h:1204
struct Ia32MtrrPhysBaseMsr::@17 fields
See: IA32_APIC_BASE MSR Supporting x2APIC.
Definition: ia32_type.h:562
See: Exit Qualification for EPT Violations.
Definition: ia32_type.h:1569
void EptHandleEptViolation(EptData *ept_data)
Definition: ept.cpp:647
void EptTermination(EptData *ept_data)
Definition: ept.cpp:760
bool enabled
Definition: ept.cpp:74
ULONG reserverd2
Definition: ept.cpp:78
static const auto kEptpMtrrEntriesSize
Definition: ept.cpp:64
Declares interfaces to assembly functions.
ULONG64 valid
[11]
Definition: ia32_type.h:555
bool EptIsEptAvailable()
Checks if the system supports EPT technology sufficient enough.
Definition: ept.cpp:163
constexpr bool IsReleaseBuild()
Checks if the project is compiled as Release.
Definition: common.h:138
Declares interfaces to utility functions.
bool reserverd1
Definition: ept.cpp:77
ULONG64 type
[0:7]
Definition: ia32_type.h:542
ULONG64 UtilPaFromVa(void *va)
Definition: util.cpp:590
static EptCommonEntry * EptpAllocateEptEntry(_In_opt_ EptData *ept_data)
ULONG64 range_base
Definition: ept.cpp:79
void EptInitializeMtrrEntries()
Reads and stores all MTRRs to set a correct memory type for EPT.
Definition: ept.cpp:193
ULONG64 default_mtemory_type
Definition: ia32_type.h:520
static bool EptpIsDeviceMemory(_In_ ULONG64 physical_address)
ULONG64 range_end
Definition: ept.cpp:80
unsigned support_individual_address_invvpid
[40]
Definition: ia32_type.h:1209
ULONG64 read_access
[0]
Definition: ept.h:35
UCHAR type
Definition: ept.cpp:76
#define HYPERPLATFORM_LOG_ERROR_SAFE(format,...)
Definition: log.h:72
static const auto kEptpPtiShift
Definition: ept.cpp:45
ULONG_PTR UtilVmRead(VmcsField field)
Definition: util.cpp:705
ULONG64 execute_access
[2]
Definition: ept.h:37
ULONG64 ept_executable
[5]
Definition: ia32_type.h:1577
ULONG64 phys_mask
[12:MAXPHYADDR]
Definition: ia32_type.h:556
static const auto kEptpNumOfFixedRangeMtrrs
Definition: ept.cpp:60
static const auto kEptpPtxMask
Definition: ept.cpp:48
static const auto kEptpPdiShift
Definition: ept.cpp:42
unsigned support_invept
[20]
Definition: ia32_type.h:1201
unsigned support_invvpid
[32]
Definition: ia32_type.h:1207
ULONG64 write_access
[1]
Definition: ept.h:36
ULONG64 variable_range_count
Definition: ia32_type.h:507
struct Ia32MtrrDefaultTypeMsr::@15 fields
unsigned support_write_back_memory_type
[14]
Definition: ia32_type.h:1196
See: IA32_MTRR_DEF_TYPE MSR.
Definition: ia32_type.h:517
ULONG64 phys_base
[12:MAXPHYADDR]
Definition: ia32_type.h:544
See: IA32_MTRRCAP Register.
Definition: ia32_type.h:504
volatile long preallocated_entries_count
Definition: ept.cpp:91
Declares and implements common things across the project.
See: Fixed Range MTRRs.
Definition: ia32_type.h:529
Definition: ept.cpp:73
static const auto kEptpNumOfMaxVariableRangeMtrrs
Definition: ept.cpp:55
ULONG64 UtilReadMsr64(Msr msr)
Definition: util.cpp:772
static const auto kEptpPxiShift
Definition: ept.cpp:36
EptData * EptInitialization()
Builds EPT, allocates pre-allocated entires, initializes and returns EptData.
Definition: ept.cpp:399
static EptCommonEntry * EptpConstructTables(_In_ EptCommonEntry *table, _In_ ULONG table_level, _In_ ULONG64 physical_address, _In_opt_ EptData *ept_data)
All pre-allocated entries are used.
static ULONG64 EptpAddressToPteIndex(_In_ ULONG64 physical_address)
EptCommonEntry ** preallocated_entries
Definition: ept.cpp:90
Definition: ept.cpp:86
Declares interfaces to logging functions.
ULONG64 fixed_range_supported
Definition: ia32_type.h:508
ULONG64 fixed_mtrrs_enabled
Definition: ia32_type.h:522
constexpr bool UtilIsInBounds(_In_ const T &value, _In_ const T &min, _In_ const T &max)
Tests if value is in between min and max.
Definition: util.h:300
PFN_NUMBER UtilPfnFromPa(ULONG64 pa)
Definition: util.cpp:601
ULONG64 EptGetEptPointer(EptData *ept_data)
Definition: ept.cpp:188
struct Ia32MtrrCapabilitiesMsr::@14 fields
static void EptpFreeUnusedPreAllocatedEntries(_Pre_notnull_ __drv_freesMem(Mem) EptCommonEntry **preallocated_entries, _In_ long used_count)
ULONG64 all
Definition: ia32_type.h:1420
static void EptpDestructTables(_In_ EptCommonEntry *table, _In_ ULONG table_level)
static const auto kEptpPpiShift
Definition: ept.cpp:39
static void EptpInitTableEntry(_In_ EptCommonEntry *Entry, _In_ ULONG table_level, _In_ ULONG64 physical_address)
See: IA32_MTRR_PHYSBASEn and IA32_MTRR_PHYSMASKn Variable-Range Register Pair.
Definition: ia32_type.h:551
static ULONG64 EptpAddressToPpeIndex(_In_ ULONG64 physical_address)
struct EptViolationQualification::@47 fields
A structure made up of mutual fields across all EPT entry types.
Definition: ept.h:32
EptCommonEntry * ept_pml4
Definition: ept.cpp:88