HyperPlatform Programmer's Reference
vm.cpp
Go to the documentation of this file.
1 // Copyright (c) 2015-2018, Satoshi Tanda. All rights reserved.
2 // Use of this source code is governed by a MIT-style license that can be
3 // found in the LICENSE file.
4 
7 
8 #include "vm.h"
9 #include <limits.h>
10 #include <intrin.h>
11 #include "asm.h"
12 #include "common.h"
13 #include "ept.h"
14 #include "log.h"
15 #include "util.h"
16 #include "vmm.h"
17 
18 extern "C" {
20 //
21 // macro utilities
22 //
23 
25 //
26 // constants and macros
27 //
28 
30 //
31 // types
32 //
33 
35 //
36 // prototypes
37 //
38 
39 _IRQL_requires_max_(PASSIVE_LEVEL) static bool VmpIsVmxAvailable();
40 
41 _IRQL_requires_max_(PASSIVE_LEVEL) static NTSTATUS
42  VmpSetLockBitCallback(_In_opt_ void *context);
43 
44 _IRQL_requires_max_(
45  PASSIVE_LEVEL) static SharedProcessorData *VmpInitializeSharedData();
46 
47 _IRQL_requires_max_(PASSIVE_LEVEL) static void *VmpBuildMsrBitmap();
48 
49 _IRQL_requires_max_(PASSIVE_LEVEL) static UCHAR *VmpBuildIoBitmaps();
50 
51 _IRQL_requires_max_(PASSIVE_LEVEL) static NTSTATUS
52  VmpStartVm(_In_opt_ void *context);
53 
54 _IRQL_requires_max_(PASSIVE_LEVEL) static void VmpInitializeVm(
55  _In_ ULONG_PTR guest_stack_pointer,
56  _In_ ULONG_PTR guest_instruction_pointer, _In_opt_ void *context);
57 
58 _IRQL_requires_max_(PASSIVE_LEVEL) static bool VmpEnterVmxMode(
59  _Inout_ ProcessorData *processor_data);
60 
61 _IRQL_requires_max_(PASSIVE_LEVEL) static bool VmpInitializeVmcs(
62  _Inout_ ProcessorData *processor_data);
63 
64 _IRQL_requires_max_(PASSIVE_LEVEL) static bool VmpSetupVmcs(
65  _In_ const ProcessorData *processor_data,
66  _In_ ULONG_PTR guest_stack_pointer,
67  _In_ ULONG_PTR guest_instruction_pointer, _In_ ULONG_PTR vmm_stack_pointer);
68 
69 _IRQL_requires_max_(PASSIVE_LEVEL) static void VmpLaunchVm();
70 
71 _IRQL_requires_max_(PASSIVE_LEVEL) static ULONG
72  VmpGetSegmentAccessRight(_In_ USHORT segment_selector);
73 
74 _IRQL_requires_max_(PASSIVE_LEVEL) static ULONG_PTR
75  VmpGetSegmentBase(_In_ ULONG_PTR gdt_base, _In_ USHORT segment_selector);
76 
77 _IRQL_requires_max_(PASSIVE_LEVEL) static SegmentDescriptor
78  *VmpGetSegmentDescriptor(_In_ ULONG_PTR descriptor_table_base,
79  _In_ USHORT segment_selector);
80 
81 _IRQL_requires_max_(PASSIVE_LEVEL) static ULONG_PTR
83  _In_ const SegmentDescriptor *segment_descriptor);
84 
85 _IRQL_requires_max_(PASSIVE_LEVEL) static ULONG
86  VmpAdjustControlValue(_In_ Msr msr, _In_ ULONG requested_value);
87 
88 _IRQL_requires_max_(PASSIVE_LEVEL) static NTSTATUS
89  VmpStopVm(_In_opt_ void *context);
90 
91 _IRQL_requires_max_(PASSIVE_LEVEL) static void VmpFreeProcessorData(
92  _In_opt_ ProcessorData *processor_data);
93 
94 _IRQL_requires_max_(PASSIVE_LEVEL) static void VmpFreeSharedData(
95  _In_ ProcessorData *processor_data);
96 
97 _IRQL_requires_max_(PASSIVE_LEVEL) static bool VmpIsHyperPlatformInstalled();
98 
99 #if defined(ALLOC_PRAGMA)
100 #pragma alloc_text(PAGE, VmInitialization)
101 #pragma alloc_text(PAGE, VmTermination)
102 #pragma alloc_text(PAGE, VmpIsVmxAvailable)
103 #pragma alloc_text(PAGE, VmpSetLockBitCallback)
104 #pragma alloc_text(PAGE, VmpInitializeSharedData)
105 #pragma alloc_text(PAGE, VmpBuildMsrBitmap)
106 #pragma alloc_text(PAGE, VmpBuildIoBitmaps)
107 #pragma alloc_text(PAGE, VmpStartVm)
108 #pragma alloc_text(PAGE, VmpInitializeVm)
109 #pragma alloc_text(PAGE, VmpEnterVmxMode)
110 #pragma alloc_text(PAGE, VmpInitializeVmcs)
111 #pragma alloc_text(PAGE, VmpSetupVmcs)
112 #pragma alloc_text(PAGE, VmpLaunchVm)
113 #pragma alloc_text(PAGE, VmpGetSegmentAccessRight)
114 #pragma alloc_text(PAGE, VmpGetSegmentBase)
115 #pragma alloc_text(PAGE, VmpGetSegmentDescriptor)
116 #pragma alloc_text(PAGE, VmpGetSegmentBaseByDescriptor)
117 #pragma alloc_text(PAGE, VmpAdjustControlValue)
118 #pragma alloc_text(PAGE, VmpStopVm)
119 #pragma alloc_text(PAGE, VmpFreeProcessorData)
120 #pragma alloc_text(PAGE, VmpFreeSharedData)
121 #pragma alloc_text(PAGE, VmpIsHyperPlatformInstalled)
122 #pragma alloc_text(PAGE, VmHotplugCallback)
123 #endif
124 
126 //
127 // variables
128 //
129 
131 //
132 // implementations
133 //
134 
135 // Define GetSegmentLimit if it is not defined yet (it is only defined on x64)
136 #if !defined(GetSegmentLimit)
137 inline ULONG GetSegmentLimit(_In_ ULONG selector) {
138  return __segmentlimit(selector);
139 }
140 #endif
141 
142 // Checks if a VMM can be installed, and so, installs it
143 _Use_decl_annotations_ NTSTATUS VmInitialization() {
144  PAGED_CODE();
145 
147  return STATUS_CANCELLED;
148  }
149 
150  if (!VmpIsVmxAvailable()) {
151  return STATUS_HV_FEATURE_UNAVAILABLE;
152  }
153 
154  const auto shared_data = VmpInitializeSharedData();
155  if (!shared_data) {
156  return STATUS_MEMORY_NOT_ALLOCATED;
157  }
158 
159  // Read and store all MTRRs to set a correct memory type for EPT
161 
162  // Virtualize all processors
163  auto status = UtilForEachProcessor(VmpStartVm, shared_data);
164  if (!NT_SUCCESS(status)) {
166  return status;
167  }
168  return status;
169 }
170 
171 // Checks if the system supports virtualization
172 _Use_decl_annotations_ static bool VmpIsVmxAvailable() {
173  PAGED_CODE();
174 
175  // See: DISCOVERING SUPPORT FOR VMX
176  // If CPUID.1:ECX.VMX[bit 5]=1, then VMX operation is supported.
177  int cpu_info[4] = {};
178  __cpuid(cpu_info, 1);
179  const CpuFeaturesEcx cpu_features = {static_cast<ULONG_PTR>(cpu_info[2])};
180  if (!cpu_features.fields.vmx) {
181  HYPERPLATFORM_LOG_ERROR("VMX features are not supported.");
182  return false;
183  }
184 
185  // See: BASIC VMX INFORMATION
186  // The first processors to support VMX operation use the write-back type.
187  const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)};
188  if (static_cast<memory_type>(vmx_basic_msr.fields.memory_type) !=
190  HYPERPLATFORM_LOG_ERROR("Write-back cache type is not supported.");
191  return false;
192  }
193 
194  // See: ENABLING AND ENTERING VMX OPERATION
195  Ia32FeatureControlMsr vmx_feature_control = {
197  if (!vmx_feature_control.fields.lock) {
198  HYPERPLATFORM_LOG_INFO("The lock bit is clear. Attempting to set 1.");
199  const auto status = UtilForEachProcessor(VmpSetLockBitCallback, nullptr);
200  if (!NT_SUCCESS(status)) {
201  return false;
202  }
203  }
204  if (!vmx_feature_control.fields.enable_vmxon) {
205  HYPERPLATFORM_LOG_ERROR("VMX features are not enabled.");
206  return false;
207  }
208 
209  if (!EptIsEptAvailable()) {
210  HYPERPLATFORM_LOG_ERROR("EPT features are not fully supported.");
211  return false;
212  }
213  return true;
214 }
215 
216 // Sets 1 to the lock bit of the IA32_FEATURE_CONTROL MSR
217 _Use_decl_annotations_ static NTSTATUS VmpSetLockBitCallback(void *context) {
218  UNREFERENCED_PARAMETER(context);
219  PAGED_CODE();
220 
221  Ia32FeatureControlMsr vmx_feature_control = {
223  if (vmx_feature_control.fields.lock) {
224  return STATUS_SUCCESS;
225  }
226  vmx_feature_control.fields.lock = true;
227  UtilWriteMsr64(Msr::kIa32FeatureControl, vmx_feature_control.all);
228  vmx_feature_control.all = UtilReadMsr64(Msr::kIa32FeatureControl);
229  if (!vmx_feature_control.fields.lock) {
230  HYPERPLATFORM_LOG_ERROR("The lock bit is still clear.");
231  return STATUS_DEVICE_CONFIGURATION_ERROR;
232  }
233  return STATUS_SUCCESS;
234 }
235 
236 // Initialize shared processor data
237 _Use_decl_annotations_ static SharedProcessorData *VmpInitializeSharedData() {
238  PAGED_CODE();
239 
240  const auto shared_data = reinterpret_cast<SharedProcessorData *>(
241  ExAllocatePoolWithTag(NonPagedPool, sizeof(SharedProcessorData),
243  if (!shared_data) {
244  return nullptr;
245  }
246  RtlZeroMemory(shared_data, sizeof(SharedProcessorData));
247  HYPERPLATFORM_LOG_DEBUG("shared_data = %p", shared_data);
248 
249  // Setup MSR bitmap
250  shared_data->msr_bitmap = VmpBuildMsrBitmap();
251  if (!shared_data->msr_bitmap) {
252  ExFreePoolWithTag(shared_data, kHyperPlatformCommonPoolTag);
253  return nullptr;
254  }
255 
256  // Setup IO bitmaps
257  const auto io_bitmaps = VmpBuildIoBitmaps();
258  if (!io_bitmaps) {
259  ExFreePoolWithTag(shared_data->msr_bitmap, kHyperPlatformCommonPoolTag);
260  ExFreePoolWithTag(shared_data, kHyperPlatformCommonPoolTag);
261  return nullptr;
262  }
263  shared_data->io_bitmap_a = io_bitmaps;
264  shared_data->io_bitmap_b = io_bitmaps + PAGE_SIZE;
265  return shared_data;
266 }
267 
268 // Build MSR bitmap
269 _Use_decl_annotations_ static void *VmpBuildMsrBitmap() {
270  PAGED_CODE();
271 
272  const auto msr_bitmap = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE,
274  if (!msr_bitmap) {
275  return nullptr;
276  }
277  RtlZeroMemory(msr_bitmap, PAGE_SIZE);
278 
279  // Activate VM-exit for RDMSR against all MSRs
280  const auto bitmap_read_low = reinterpret_cast<UCHAR *>(msr_bitmap);
281  const auto bitmap_read_high = bitmap_read_low + 1024;
282  RtlFillMemory(bitmap_read_low, 1024, 0xff); // read 0 - 1fff
283  RtlFillMemory(bitmap_read_high, 1024, 0xff); // read c0000000 - c0001fff
284 
285  // Ignore IA32_MPERF (000000e7) and IA32_APERF (000000e8)
286  RTL_BITMAP bitmap_read_low_header = {};
287  RtlInitializeBitMap(&bitmap_read_low_header,
288  reinterpret_cast<PULONG>(bitmap_read_low), 1024 * 8);
289  RtlClearBits(&bitmap_read_low_header, 0xe7, 2);
290 
291  // Checks MSRs that cause #GP from 0 to 0xfff, and ignore all of them
292  for (auto msr = 0ul; msr < 0x1000; ++msr) {
293  __try {
294  UtilReadMsr(static_cast<Msr>(msr));
295 
296 #pragma prefast(suppress: __WARNING_EXCEPTIONEXECUTEHANDLER, "Catch all.");
297  } __except (EXCEPTION_EXECUTE_HANDLER) {
298  RtlClearBits(&bitmap_read_low_header, msr, 1);
299  }
300  }
301 
302  // Ignore IA32_GS_BASE (c0000101) and IA32_KERNEL_GS_BASE (c0000102)
303  RTL_BITMAP bitmap_read_high_header = {};
304  RtlInitializeBitMap(&bitmap_read_high_header,
305  reinterpret_cast<PULONG>(bitmap_read_high),
306  1024 * CHAR_BIT);
307  RtlClearBits(&bitmap_read_high_header, 0x101, 2);
308 
309  return msr_bitmap;
310 }
311 
312 // Build IO bitmaps
313 _Use_decl_annotations_ static UCHAR *VmpBuildIoBitmaps() {
314  PAGED_CODE();
315 
316  // Allocate two IO bitmaps as one contiguous 4K+4K page
317  const auto io_bitmaps = reinterpret_cast<UCHAR *>(ExAllocatePoolWithTag(
318  NonPagedPool, PAGE_SIZE * 2, kHyperPlatformCommonPoolTag));
319  if (!io_bitmaps) {
320  return nullptr;
321  }
322 
323  const auto io_bitmap_a = io_bitmaps; // for 0x0 - 0x7fff
324  const auto io_bitmap_b = io_bitmaps + PAGE_SIZE; // for 0x8000 - 0xffff
325  RtlFillMemory(io_bitmap_a, PAGE_SIZE, 0);
326  RtlFillMemory(io_bitmap_b, PAGE_SIZE, 0);
327 
328  // Activate VM-exit for IO port 0x10 - 0x2010 as an example
329  RTL_BITMAP bitmap_a_header = {};
330  RtlInitializeBitMap(&bitmap_a_header, reinterpret_cast<PULONG>(io_bitmap_a),
331  PAGE_SIZE * CHAR_BIT);
332  // RtlSetBits(&bitmap_a_header, 0x10, 0x2000);
333 
334  RTL_BITMAP bitmap_b_header = {};
335  RtlInitializeBitMap(&bitmap_b_header, reinterpret_cast<PULONG>(io_bitmap_b),
336  PAGE_SIZE * CHAR_BIT);
337  // RtlSetBits(&bitmap_b_header, 0, 0x8000);
338  return io_bitmaps;
339 }
340 
341 // Virtualize the current processor
342 _Use_decl_annotations_ static NTSTATUS VmpStartVm(void *context) {
343  PAGED_CODE();
344 
345  HYPERPLATFORM_LOG_INFO("Initializing VMX for the processor %lu.",
346  KeGetCurrentProcessorNumberEx(nullptr));
347  const auto ok = AsmInitializeVm(VmpInitializeVm, context);
348  NT_ASSERT(VmpIsHyperPlatformInstalled() == ok);
349  if (!ok) {
350  return STATUS_UNSUCCESSFUL;
351  }
352  HYPERPLATFORM_LOG_INFO("Initialized successfully.");
353  return STATUS_SUCCESS;
354 }
355 
356 // Allocates structures for virtualization, initializes VMCS and virtualizes
357 // the current processor
358 _Use_decl_annotations_ static void VmpInitializeVm(
359  ULONG_PTR guest_stack_pointer, ULONG_PTR guest_instruction_pointer,
360  void *context) {
361  PAGED_CODE();
362 
363  const auto shared_data = reinterpret_cast<SharedProcessorData *>(context);
364  if (!shared_data) {
365  return;
366  }
367 
368  // Allocate related structures
369  const auto processor_data =
370  reinterpret_cast<ProcessorData *>(ExAllocatePoolWithTag(
371  NonPagedPool, sizeof(ProcessorData), kHyperPlatformCommonPoolTag));
372  if (!processor_data) {
373  return;
374  }
375  RtlZeroMemory(processor_data, sizeof(ProcessorData));
376  processor_data->shared_data = shared_data;
377  InterlockedIncrement(&processor_data->shared_data->reference_count);
378 
379  // Set up EPT
380  processor_data->ept_data = EptInitialization();
381  if (!processor_data->ept_data) {
382  goto ReturnFalse;
383  }
384 
385  // Allocate other processor data fields
386  processor_data->vmm_stack_limit =
387  UtilAllocateContiguousMemory(KERNEL_STACK_SIZE);
388  if (!processor_data->vmm_stack_limit) {
389  goto ReturnFalse;
390  }
391  RtlZeroMemory(processor_data->vmm_stack_limit, KERNEL_STACK_SIZE);
392 
393  processor_data->vmcs_region =
394  reinterpret_cast<VmControlStructure *>(ExAllocatePoolWithTag(
396  if (!processor_data->vmcs_region) {
397  goto ReturnFalse;
398  }
399  RtlZeroMemory(processor_data->vmcs_region, kVmxMaxVmcsSize);
400 
401  processor_data->vmxon_region =
402  reinterpret_cast<VmControlStructure *>(ExAllocatePoolWithTag(
404  if (!processor_data->vmxon_region) {
405  goto ReturnFalse;
406  }
407  RtlZeroMemory(processor_data->vmxon_region, kVmxMaxVmcsSize);
408 
409  // Initialize stack memory for VMM like this:
410  //
411  // (High)
412  // +------------------+ <- vmm_stack_region_base (eg, AED37000)
413  // | processor_data | <- vmm_stack_data (eg, AED36FFC)
414  // +------------------+
415  // | MAXULONG_PTR | <- vmm_stack_base (initial SP)(eg, AED36FF8)
416  // +------------------+ v
417  // | | v
418  // | (VMM Stack) | v (grow)
419  // | | v
420  // +------------------+ <- vmm_stack_limit (eg, AED34000)
421  // (Low)
422  const auto vmm_stack_region_base =
423  reinterpret_cast<ULONG_PTR>(processor_data->vmm_stack_limit) +
424  KERNEL_STACK_SIZE;
425  const auto vmm_stack_data = vmm_stack_region_base - sizeof(void *);
426  const auto vmm_stack_base = vmm_stack_data - sizeof(void *);
427  HYPERPLATFORM_LOG_DEBUG("vmm_stack_limit = %p",
428  processor_data->vmm_stack_limit);
429  HYPERPLATFORM_LOG_DEBUG("vmm_stack_region_base = %016Ix",
430  vmm_stack_region_base);
431  HYPERPLATFORM_LOG_DEBUG("vmm_stack_data = %016Ix", vmm_stack_data);
432  HYPERPLATFORM_LOG_DEBUG("vmm_stack_base = %016Ix", vmm_stack_base);
433  HYPERPLATFORM_LOG_DEBUG("processor_data = %p stored at %016Ix",
434  processor_data, vmm_stack_data);
435  HYPERPLATFORM_LOG_DEBUG("guest_stack_pointer = %016Ix",
436  guest_stack_pointer);
437  HYPERPLATFORM_LOG_DEBUG("guest_inst_pointer = %016Ix",
438  guest_instruction_pointer);
439  *reinterpret_cast<ULONG_PTR *>(vmm_stack_base) = MAXULONG_PTR;
440  *reinterpret_cast<ProcessorData **>(vmm_stack_data) = processor_data;
441 
442  // Set up VMCS
443  if (!VmpEnterVmxMode(processor_data)) {
444  goto ReturnFalse;
445  }
446  if (!VmpInitializeVmcs(processor_data)) {
447  goto ReturnFalseWithVmxOff;
448  }
449  if (!VmpSetupVmcs(processor_data, guest_stack_pointer,
450  guest_instruction_pointer, vmm_stack_base)) {
451  goto ReturnFalseWithVmxOff;
452  }
453 
454  // Do virtualize the processor
455  VmpLaunchVm();
456 
457  // Here is not be executed with successful vmlaunch. Instead, the context
458  // jumps to an address specified by guest_instruction_pointer.
459 
460 ReturnFalseWithVmxOff:;
461  __vmx_off();
462 
463 ReturnFalse:;
464  VmpFreeProcessorData(processor_data);
465 }
466 
467 // See: VMM SETUP & TEAR DOWN
468 _Use_decl_annotations_ static bool VmpEnterVmxMode(
469  ProcessorData *processor_data) {
470  PAGED_CODE();
471 
472  // Apply FIXED bits
473  // See: VMX-FIXED BITS IN CR0
474 
475  // IA32_VMX_CRx_FIXED0 IA32_VMX_CRx_FIXED1 Meaning
476  // Values 1 * bit of CRx is fixed to 1
477  // Values 0 1 bit of CRx is flexible
478  // Values * 0 bit of CRx is fixed to 0
479  const Cr0 cr0_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed0)};
480  const Cr0 cr0_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed1)};
481  Cr0 cr0 = {__readcr0()};
482  Cr0 cr0_original = cr0;
483  cr0.all &= cr0_fixed1.all;
484  cr0.all |= cr0_fixed0.all;
485  __writecr0(cr0.all);
486 
487  HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR0_FIXED0 = %08Ix", cr0_fixed0.all);
488  HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR0_FIXED1 = %08Ix", cr0_fixed1.all);
489  HYPERPLATFORM_LOG_DEBUG("Original CR0 = %08Ix", cr0_original.all);
490  HYPERPLATFORM_LOG_DEBUG("Fixed CR0 = %08Ix", cr0.all);
491 
492  // See: VMX-FIXED BITS IN CR4
493  const Cr4 cr4_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed0)};
494  const Cr4 cr4_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed1)};
495  Cr4 cr4 = {__readcr4()};
496  Cr4 cr4_original = cr4;
497  cr4.all &= cr4_fixed1.all;
498  cr4.all |= cr4_fixed0.all;
499  __writecr4(cr4.all);
500 
501  HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR4_FIXED0 = %08Ix", cr4_fixed0.all);
502  HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR4_FIXED1 = %08Ix", cr4_fixed1.all);
503  HYPERPLATFORM_LOG_DEBUG("Original CR4 = %08Ix", cr4_original.all);
504  HYPERPLATFORM_LOG_DEBUG("Fixed CR4 = %08Ix", cr4.all);
505 
506  // Write a VMCS revision identifier
507  const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)};
508  processor_data->vmxon_region->revision_identifier =
509  vmx_basic_msr.fields.revision_identifier;
510 
511  auto vmxon_region_pa = UtilPaFromVa(processor_data->vmxon_region);
512  if (__vmx_on(&vmxon_region_pa)) {
513  return false;
514  }
515 
516  // See: Guidelines for Use of the INVVPID Instruction, and Guidelines for Use
517  // of the INVEPT Instruction
520  return true;
521 }
522 
523 // See: VMM SETUP & TEAR DOWN
524 _Use_decl_annotations_ static bool VmpInitializeVmcs(
525  ProcessorData *processor_data) {
526  PAGED_CODE();
527 
528  // Write a VMCS revision identifier
529  const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)};
530  processor_data->vmcs_region->revision_identifier =
531  vmx_basic_msr.fields.revision_identifier;
532 
533  auto vmcs_region_pa = UtilPaFromVa(processor_data->vmcs_region);
534  if (__vmx_vmclear(&vmcs_region_pa)) {
535  return false;
536  }
537  if (__vmx_vmptrld(&vmcs_region_pa)) {
538  return false;
539  }
540 
541  // The launch state of current VMCS is "clear"
542  return true;
543 }
544 
545 // See: PREPARATION AND LAUNCHING A VIRTUAL MACHINE
546 _Use_decl_annotations_ static bool VmpSetupVmcs(
547  const ProcessorData *processor_data, ULONG_PTR guest_stack_pointer,
548  ULONG_PTR guest_instruction_pointer, ULONG_PTR vmm_stack_pointer) {
549  PAGED_CODE();
550 
551  Gdtr gdtr = {};
552  __sgdt(&gdtr);
553 
554  Idtr idtr = {};
555  __sidt(&idtr);
556 
557  // See: Algorithms for Determining VMX Capabilities
558  const auto use_true_msrs = Ia32VmxBasicMsr{UtilReadMsr64(Msr::kIa32VmxBasic)}
559  .fields.vmx_capability_hint;
560 
561  VmxVmEntryControls vm_entryctl_requested = {};
562  vm_entryctl_requested.fields.load_debug_controls = true;
563  vm_entryctl_requested.fields.ia32e_mode_guest = IsX64();
566  vm_entryctl_requested.all)};
567 
568  VmxVmExitControls vm_exitctl_requested = {};
569  vm_exitctl_requested.fields.host_address_space_size = IsX64();
572  vm_exitctl_requested.all)};
573 
574  VmxPinBasedControls vm_pinctl_requested = {};
575  VmxPinBasedControls vm_pinctl = {
578  vm_pinctl_requested.all)};
579 
580  VmxProcessorBasedControls vm_procctl_requested = {};
581  vm_procctl_requested.fields.cr3_load_exiting = true;
582  vm_procctl_requested.fields.mov_dr_exiting = true;
583  vm_procctl_requested.fields.use_io_bitmaps = true;
584  vm_procctl_requested.fields.use_msr_bitmaps = true;
585  vm_procctl_requested.fields.activate_secondary_control = true;
586  VmxProcessorBasedControls vm_procctl = {
589  vm_procctl_requested.all)};
590 
591  VmxSecondaryProcessorBasedControls vm_procctl2_requested = {};
592  vm_procctl2_requested.fields.enable_ept = true;
593  vm_procctl2_requested.fields.descriptor_table_exiting = true;
594  vm_procctl2_requested.fields.enable_rdtscp = true; // for Win10
595  vm_procctl2_requested.fields.enable_vpid = true;
596  vm_procctl2_requested.fields.enable_invpcid = true; // for Win10
597  vm_procctl2_requested.fields.enable_xsaves_xstors = true; // for Win10
599  Msr::kIa32VmxProcBasedCtls2, vm_procctl2_requested.all)};
600 
601  HYPERPLATFORM_LOG_DEBUG("VmEntryControls = %08x",
602  vm_entryctl.all);
603  HYPERPLATFORM_LOG_DEBUG("VmExitControls = %08x",
604  vm_exitctl.all);
605  HYPERPLATFORM_LOG_DEBUG("PinBasedControls = %08x",
606  vm_pinctl.all);
607  HYPERPLATFORM_LOG_DEBUG("ProcessorBasedControls = %08x",
608  vm_procctl.all);
609  HYPERPLATFORM_LOG_DEBUG("SecondaryProcessorBasedControls = %08x",
610  vm_procctl2.all);
611 
612  // NOTE: Comment in any of those as needed
613  const auto exception_bitmap =
614  // 1 << InterruptionVector::kBreakpointException |
615  // 1 << InterruptionVector::kGeneralProtectionException |
616  // 1 << InterruptionVector::kPageFaultException |
617  0;
618 
619  // Set up CR0 and CR4 bitmaps
620  // - Where a bit is masked, the shadow bit appears
621  // - Where a bit is not masked, the actual bit appears
622  // VM-exit occurs when a guest modifies any of those fields
623  Cr0 cr0_mask = {};
624  Cr0 cr0_shadow = {__readcr0()};
625 
626  Cr4 cr4_mask = {};
627  Cr4 cr4_shadow = {__readcr4()};
628  // For example, when we want to hide CR4.VMXE from the guest, comment in below
629  // cr4_mask.fields.vmxe = true;
630  // cr4_shadow.fields.vmxe = false;
631 
632  // See: PDPTE Registers
633  // If PAE paging would be in use following an execution of MOV to CR0 or MOV
634  // to CR4 (see Section 4.1.1) and the instruction is modifying any of CR0.CD,
635  // CR0.NW, CR0.PG, CR4.PAE, CR4.PGE, CR4.PSE, or CR4.SMEP; then the PDPTEs are
636  // loaded from the address in CR3.
637  if (UtilIsX86Pae()) {
638  cr0_mask.fields.pg = true;
639  cr0_mask.fields.cd = true;
640  cr0_mask.fields.nw = true;
641  cr4_mask.fields.pae = true;
642  cr4_mask.fields.pge = true;
643  cr4_mask.fields.pse = true;
644  cr4_mask.fields.smep = true;
645  }
646 
647  // clang-format off
648  auto error = VmxStatus::kOk;
649 
650  /* 16-Bit Control Field */
651  error |= UtilVmWrite(VmcsField::kVirtualProcessorId, KeGetCurrentProcessorNumberEx(nullptr) + 1);
652 
653  /* 16-Bit Guest-State Fields */
662 
663  /* 16-Bit Host-State Fields */
664  // RPL and TI have to be 0
672 
673  /* 64-Bit Control Fields */
678 
679  /* 64-Bit Guest-State Fields */
680  error |= UtilVmWrite64(VmcsField::kVmcsLinkPointer, MAXULONG64);
682  if (UtilIsX86Pae()) {
683  UtilLoadPdptes(__readcr3());
684  }
685 
686  /* 32-Bit Control Fields */
688  error |= UtilVmWrite(VmcsField::kCpuBasedVmExecControl, vm_procctl.all);
689  error |= UtilVmWrite(VmcsField::kExceptionBitmap, exception_bitmap);
690  error |= UtilVmWrite(VmcsField::kVmExitControls, vm_exitctl.all);
691  error |= UtilVmWrite(VmcsField::kVmEntryControls, vm_entryctl.all);
692  error |= UtilVmWrite(VmcsField::kSecondaryVmExecControl, vm_procctl2.all);
693 
694  /* 32-Bit Guest-State Fields */
714 
715  /* 32-Bit Host-State Field */
717 
718  /* Natural-Width Control Fields */
719  error |= UtilVmWrite(VmcsField::kCr0GuestHostMask, cr0_mask.all);
720  error |= UtilVmWrite(VmcsField::kCr4GuestHostMask, cr4_mask.all);
721  error |= UtilVmWrite(VmcsField::kCr0ReadShadow, cr0_shadow.all);
722  error |= UtilVmWrite(VmcsField::kCr4ReadShadow, cr4_shadow.all);
723 
724  /* Natural-Width Guest-State Fields */
725  error |= UtilVmWrite(VmcsField::kGuestCr0, __readcr0());
726  error |= UtilVmWrite(VmcsField::kGuestCr3, __readcr3());
727  error |= UtilVmWrite(VmcsField::kGuestCr4, __readcr4());
728 #if defined(_AMD64_)
735 #else
742 #endif
747  error |= UtilVmWrite(VmcsField::kGuestDr7, __readdr(7));
748  error |= UtilVmWrite(VmcsField::kGuestRsp, guest_stack_pointer);
749  error |= UtilVmWrite(VmcsField::kGuestRip, guest_instruction_pointer);
750  error |= UtilVmWrite(VmcsField::kGuestRflags, __readeflags());
753 
754  /* Natural-Width Host-State Fields */
755  error |= UtilVmWrite(VmcsField::kHostCr0, __readcr0());
756  error |= UtilVmWrite(VmcsField::kHostCr3, __readcr3());
757  error |= UtilVmWrite(VmcsField::kHostCr4, __readcr4());
758 #if defined(_AMD64_)
761 #else
764 #endif
766  error |= UtilVmWrite(VmcsField::kHostGdtrBase, gdtr.base);
767  error |= UtilVmWrite(VmcsField::kHostIdtrBase, idtr.base);
770  error |= UtilVmWrite(VmcsField::kHostRsp, vmm_stack_pointer);
771  error |= UtilVmWrite(VmcsField::kHostRip, reinterpret_cast<ULONG_PTR>(AsmVmmEntryPoint));
772  // clang-format on
773 
774  const auto vmx_status = static_cast<VmxStatus>(error);
775  return vmx_status == VmxStatus::kOk;
776 }
777 
778 // Executes vmlaunch
779 _Use_decl_annotations_ static void VmpLaunchVm() {
780  PAGED_CODE();
781 
782  auto error_code = UtilVmRead(VmcsField::kVmInstructionError);
783  if (error_code) {
784  HYPERPLATFORM_LOG_WARN("VM_INSTRUCTION_ERROR = %Iu", error_code);
785  }
786 
787  auto vmx_status = static_cast<VmxStatus>(__vmx_vmlaunch());
788 
789  // Here should not executed with successful vmlaunch. Instead, the context
790  // jumps to an address specified by GUEST_RIP.
791  if (vmx_status == VmxStatus::kErrorWithStatus) {
793  HYPERPLATFORM_LOG_ERROR("VM_INSTRUCTION_ERROR = %Iu", error_code);
794  }
796 }
797 
798 // Returns access right of the segment specified by the SegmentSelector for VMX
799 _Use_decl_annotations_ static ULONG VmpGetSegmentAccessRight(
800  USHORT segment_selector) {
801  PAGED_CODE();
802 
803  VmxRegmentDescriptorAccessRight access_right = {};
804  if (segment_selector) {
805  const SegmentSelector ss = {segment_selector};
806  auto native_access_right = AsmLoadAccessRightsByte(ss.all);
807  native_access_right >>= 8;
808  access_right.all = static_cast<ULONG>(native_access_right);
809  access_right.fields.reserved1 = 0;
810  access_right.fields.reserved2 = 0;
811  access_right.fields.unusable = false;
812  } else {
813  access_right.fields.unusable = true;
814  }
815  return access_right.all;
816 }
817 
818 // Returns a base address of the segment specified by SegmentSelector
819 _Use_decl_annotations_ static ULONG_PTR VmpGetSegmentBase(
820  ULONG_PTR gdt_base, USHORT segment_selector) {
821  PAGED_CODE();
822 
823  const SegmentSelector ss = {segment_selector};
824  if (!ss.all) {
825  return 0;
826  }
827 
828  if (ss.fields.ti) {
829  const auto local_segment_descriptor =
831  const auto ldt_base =
832  VmpGetSegmentBaseByDescriptor(local_segment_descriptor);
833  const auto segment_descriptor =
834  VmpGetSegmentDescriptor(ldt_base, segment_selector);
835  return VmpGetSegmentBaseByDescriptor(segment_descriptor);
836  } else {
837  const auto segment_descriptor =
838  VmpGetSegmentDescriptor(gdt_base, segment_selector);
839  return VmpGetSegmentBaseByDescriptor(segment_descriptor);
840  }
841 }
842 
843 // Returns the segment descriptor corresponds to the SegmentSelector
844 _Use_decl_annotations_ static SegmentDescriptor *VmpGetSegmentDescriptor(
845  ULONG_PTR descriptor_table_base, USHORT segment_selector) {
846  PAGED_CODE();
847 
848  const SegmentSelector ss = {segment_selector};
849  return reinterpret_cast<SegmentDescriptor *>(
850  descriptor_table_base + ss.fields.index * sizeof(SegmentDescriptor));
851 }
852 
853 // Returns a base address of segment_descriptor
854 _Use_decl_annotations_ static ULONG_PTR VmpGetSegmentBaseByDescriptor(
855  const SegmentDescriptor *segment_descriptor) {
856  PAGED_CODE();
857 
858  // Calculate a 32bit base address
859  const auto base_high = segment_descriptor->fields.base_high << (6 * 4);
860  const auto base_middle = segment_descriptor->fields.base_mid << (4 * 4);
861  const auto base_low = segment_descriptor->fields.base_low;
862  ULONG_PTR base = (base_high | base_middle | base_low) & MAXULONG;
863  // Get upper 32bit of the base address if needed
864  if (IsX64() && !segment_descriptor->fields.system) {
865  auto desc64 =
866  reinterpret_cast<const SegmentDesctiptorX64 *>(segment_descriptor);
867  ULONG64 base_upper32 = desc64->base_upper32;
868  base |= (base_upper32 << 32);
869  }
870  return base;
871 }
872 
873 // Adjust the requested control value with consulting a value of related MSR
874 _Use_decl_annotations_ static ULONG VmpAdjustControlValue(
875  Msr msr, ULONG requested_value) {
876  PAGED_CODE();
877 
878  LARGE_INTEGER msr_value = {};
879  msr_value.QuadPart = UtilReadMsr64(msr);
880  auto adjusted_value = requested_value;
881 
882  // bit == 0 in high word ==> must be zero
883  adjusted_value &= msr_value.HighPart;
884  // bit == 1 in low word ==> must be one
885  adjusted_value |= msr_value.LowPart;
886  return adjusted_value;
887 }
888 
889 // Terminates VM
890 _Use_decl_annotations_ void VmTermination() {
891  PAGED_CODE();
892 
893  HYPERPLATFORM_LOG_INFO("Uninstalling VMM.");
894  auto status = UtilForEachProcessor(VmpStopVm, nullptr);
895  if (NT_SUCCESS(status)) {
896  HYPERPLATFORM_LOG_INFO("The VMM has been uninstalled.");
897  } else {
898  HYPERPLATFORM_LOG_WARN("The VMM has not been uninstalled (%08x).", status);
899  }
900  NT_ASSERT(!VmpIsHyperPlatformInstalled());
901 }
902 
903 // Stops virtualization through a hypercall and frees all related memory
904 _Use_decl_annotations_ static NTSTATUS VmpStopVm(void *context) {
905  UNREFERENCED_PARAMETER(context);
906  PAGED_CODE();
907 
908  HYPERPLATFORM_LOG_INFO("Terminating VMX for the processor %lu.",
909  KeGetCurrentProcessorNumberEx(nullptr));
910 
911  // Stop virtualization and get an address of the management structure
912  ProcessorData *processor_data = nullptr;
913  auto status = UtilVmCall(HypercallNumber::kTerminateVmm, &processor_data);
914  if (!NT_SUCCESS(status)) {
915  return status;
916  }
917 
918  // Clear CR4.VMXE, as there is no reason to leave the bit after vmxoff
919  Cr4 cr4 = {__readcr4()};
920  cr4.fields.vmxe = false;
921  __writecr4(cr4.all);
922 
923  VmpFreeProcessorData(processor_data);
924  return STATUS_SUCCESS;
925 }
926 
927 // Frees all related memory
928 _Use_decl_annotations_ static void VmpFreeProcessorData(
929  ProcessorData *processor_data) {
930  PAGED_CODE();
931 
932  if (!processor_data) {
933  return;
934  }
935  if (processor_data->vmm_stack_limit) {
936  UtilFreeContiguousMemory(processor_data->vmm_stack_limit);
937  }
938  if (processor_data->vmcs_region) {
939  ExFreePoolWithTag(processor_data->vmcs_region, kHyperPlatformCommonPoolTag);
940  }
941  if (processor_data->vmxon_region) {
942  ExFreePoolWithTag(processor_data->vmxon_region,
944  }
945  if (processor_data->ept_data) {
946  EptTermination(processor_data->ept_data);
947  }
948 
949  VmpFreeSharedData(processor_data);
950 
951  ExFreePoolWithTag(processor_data, kHyperPlatformCommonPoolTag);
952 }
953 
954 // Decrement reference count of shared data and free it if no reference
955 _Use_decl_annotations_ static void VmpFreeSharedData(
956  ProcessorData *processor_data) {
957  PAGED_CODE();
958 
959  if (!processor_data->shared_data) {
960  return;
961  }
962 
963  if (InterlockedDecrement(&processor_data->shared_data->reference_count) !=
964  0) {
965  return;
966  }
967 
968  HYPERPLATFORM_LOG_DEBUG("Freeing shared data...");
969  if (processor_data->shared_data->io_bitmap_a) {
970  ExFreePoolWithTag(processor_data->shared_data->io_bitmap_a,
972  }
973  if (processor_data->shared_data->msr_bitmap) {
974  ExFreePoolWithTag(processor_data->shared_data->msr_bitmap,
976  }
977  ExFreePoolWithTag(processor_data->shared_data, kHyperPlatformCommonPoolTag);
978 }
979 
980 // Tests if HyperPlatform is already installed
981 _Use_decl_annotations_ static bool VmpIsHyperPlatformInstalled() {
982  PAGED_CODE();
983 
984  int cpu_info[4] = {};
985  __cpuid(cpu_info, 1);
986  const CpuFeaturesEcx cpu_features = {static_cast<ULONG_PTR>(cpu_info[2])};
987  if (!cpu_features.fields.not_used) {
988  return false;
989  }
990 
991  __cpuid(cpu_info, kHyperVCpuidInterface);
992  return cpu_info[0] == 'PpyH';
993 }
994 
995 // Virtualizes the specified processor
996 _Use_decl_annotations_ NTSTATUS
997 VmHotplugCallback(const PROCESSOR_NUMBER &proc_num) {
998  PAGED_CODE();
999 
1000  // Switch to the processor 0 to get SharedProcessorData
1001  GROUP_AFFINITY affinity = {};
1002  GROUP_AFFINITY previous_affinity = {};
1003  KeSetSystemGroupAffinityThread(&affinity, &previous_affinity);
1004 
1005  SharedProcessorData *shared_data = nullptr;
1006  auto status =
1008 
1009  KeSetSystemGroupAffinityThread(&affinity, &previous_affinity);
1010 
1011  if (!NT_SUCCESS(status)) {
1012  return status;
1013  }
1014  if (!shared_data) {
1015  return STATUS_UNSUCCESSFUL;
1016  }
1017 
1018  // Switch to the newly added processor to virtualize it
1019  affinity.Group = proc_num.Group;
1020  affinity.Mask = 1ull << proc_num.Number;
1021  KeSetSystemGroupAffinityThread(&affinity, &previous_affinity);
1022 
1023  status = VmpStartVm(shared_data);
1024 
1025  KeRevertToUserGroupAffinityThread(&previous_affinity);
1026  return status;
1027 }
1028 
1029 } // extern "C"
static ULONG VmpAdjustControlValue(_In_ Msr msr, _In_ ULONG requested_value)
struct SegmentSelector::@7 fields
struct EptData * ept_data
A pointer to EPT related data.
Definition: vmm.h:42
unsigned mov_dr_exiting
[23]
Definition: ia32_type.h:1008
void * msr_bitmap
Bitmap to activate MSR I/O VM-exit.
Definition: vmm.h:31
static UCHAR * VmpBuildIoBitmaps()
Definition: vm.cpp:313
static bool VmpEnterVmxMode(_Inout_ ProcessorData *processor_data)
USHORT __stdcall AsmReadGS()
Reads GS.
ULONG_PTR all
Definition: ia32_type.h:145
#define HYPERPLATFORM_LOG_INFO(format,...)
Definition: log.h:38
#define HYPERPLATFORM_LOG_DEBUG(format,...)
Logs a message as respective severity.
Definition: log.h:34
static void VmpFreeSharedData(_In_ ProcessorData *processor_data)
unsigned int all
Definition: ia32_type.h:973
static void VmpInitializeVm(_In_ ULONG_PTR guest_stack_pointer, _In_ ULONG_PTR guest_instruction_pointer, _In_opt_ void *context)
VmxStatus UtilVmWrite(VmcsField field, ULONG_PTR field_value)
Definition: util.cpp:737
See: CONTROL REGISTERS.
Definition: ia32_type.h:122
unsigned load_debug_controls
[2]
Definition: ia32_type.h:1083
VmxStatus
Indicates a result of VMX-instructions.
Definition: util.h:55
ULONG_PTR base
Definition: ia32_type.h:225
unsigned pae
[5] Physical Address Extension
Definition: ia32_type.h:152
USHORT __stdcall AsmReadCS()
Reads CS.
NTSTATUS VmInitialization()
Virtualizes all processors.
Definition: vm.cpp:143
USHORT __stdcall AsmReadTR()
Reads STR.
struct VmControlStructure * vmcs_region
VA of a VMCS region.
Definition: vmm.h:41
unsigned __int64 all
Definition: ia32_type.h:1119
static void * VmpBuildMsrBitmap()
Definition: vm.cpp:269
unsigned unusable
[16] Segment unusable
Definition: ia32_type.h:1111
static const ULONG kHyperPlatformCommonPoolTag
A pool tag.
Definition: common.h:93
static ULONG_PTR VmpGetSegmentBaseByDescriptor(_In_ const SegmentDescriptor *segment_descriptor)
See: Virtual-Machine Control Structures & FORMAT OF THE VMCS REGION.
Definition: ia32_type.h:965
void VmTermination()
De-virtualize all processors.
Definition: vm.cpp:890
constexpr bool IsX64()
Checks if a system is x64.
Definition: common.h:128
unsigned use_msr_bitmaps
[28]
Definition: ia32_type.h:1013
#define HYPERPLATFORM_COMMON_DBG_BREAK()
Sets a break point that works only when a debugger is present.
Definition: common.h:55
unsigned vmxe
[13] Virtual Machine Extensions Enabled
Definition: ia32_type.h:159
ULONG32 not_used
[31] Always 0 (a.k.a. HypervisorPresent)
Definition: ia32_type.h:344
USHORT __stdcall AsmReadDS()
Reads DS.
static bool VmpIsVmxAvailable()
Definition: vm.cpp:172
static NTSTATUS VmpStopVm(_In_opt_ void *context)
Operation failed with extended status available.
unsigned nw
[29] Not Write-Through
Definition: ia32_type.h:136
struct SegmentDescriptor::@8 fields
See: CONTROL REGISTERS.
Definition: ia32_type.h:144
unsigned lock
[0]
Definition: ia32_type.h:1121
static ULONG_PTR VmpGetSegmentBase(_In_ ULONG_PTR gdt_base, _In_ USHORT segment_selector)
See: Guest Register State.
Definition: ia32_type.h:1099
struct Cr0::@2 fields
USHORT __stdcall AsmReadES()
Reads ES.
Operation succeeded.
ULONG GetSegmentLimit(_In_ ULONG selector)
Definition: vm.cpp:137
void __sgdt(_Out_ void *gdtr)
Writes to GDT.
Definition: asm.h:161
ULONG64 base_high
Definition: ia32_type.h:296
#define HYPERPLATFORM_LOG_WARN(format,...)
Definition: log.h:42
ULONG_PTR all
Definition: ia32_type.h:123
void * UtilAllocateContiguousMemory(SIZE_T number_of_bytes)
Definition: util.cpp:623
struct Ia32FeatureControlMsr::@27 fields
See: Definitions of Primary Processor-Based VM-Execution Controls.
Definition: ia32_type.h:987
Declares interfaces to EPT functions.
unsigned enable_vmxon
[2]
Definition: ia32_type.h:1123
unsigned pge
[7] Page Global Enable
Definition: ia32_type.h:154
See: Definitions of VM-Entry Controls.
Definition: ia32_type.h:1079
bool __stdcall AsmInitializeVm(_In_ void(*vm_initialization_routine)(_In_ ULONG_PTR, _In_ ULONG_PTR, _In_opt_ void *), _In_opt_ void *context)
A wrapper for vm_initialization_routine.
struct Ia32VmxBasicMsr::@28 fields
VmxStatus UtilInveptGlobal()
Executes the INVEPT instruction and invalidates EPT entry cache.
Definition: util.cpp:787
unsigned host_address_space_size
[9]
Definition: ia32_type.h:1061
SharedProcessorData * shared_data
Shared data.
Definition: vmm.h:38
struct VmxRegmentDescriptorAccessRight::@26 fields
void * io_bitmap_a
Bitmap to activate IO VM-exit (~ 0x7FFF)
Definition: vmm.h:32
void * vmm_stack_limit
A head of VA for VMM stack.
Definition: vmm.h:39
USHORT __stdcall AsmReadLDTR()
Reads SLDT.
struct VmxSecondaryProcessorBasedControls::@23 fields
ULONG64 base_mid
Definition: ia32_type.h:286
struct VmxProcessorBasedControls::@22 fields
void * io_bitmap_b
Bitmap to activate IO VM-exit (~ 0xffff)
Definition: vmm.h:33
Declares interfaces to VMM functions.
NTSTATUS UtilVmCall(HypercallNumber hypercall_number, void *context)
Definition: util.cpp:649
unsigned long revision_identifier
Definition: ia32_type.h:966
USHORT __stdcall AsmReadSS()
Reads SS.
unsigned pse
[4] Page Size Extensions
Definition: ia32_type.h:151
void EptTermination(EptData *ept_data)
Definition: ept.cpp:760
static void VmpFreeProcessorData(_In_opt_ ProcessorData *processor_data)
void UtilWriteMsr64(Msr msr, ULONG64 value)
Definition: util.cpp:782
unsigned smep
[20] Supervisor Mode Execution Protection Enable
Definition: ia32_type.h:165
static NTSTATUS VmpSetLockBitCallback(_In_opt_ void *context)
Declares interfaces to assembly functions.
See: Feature Information Returned in the ECX Register.
Definition: ia32_type.h:310
unsigned int all
Definition: ia32_type.h:1080
Msr
See: MODEL-SPECIFIC REGISTERS (MSRS)
Definition: ia32_type.h:576
static bool VmpSetupVmcs(_In_ const ProcessorData *processor_data, _In_ ULONG_PTR guest_stack_pointer, _In_ ULONG_PTR guest_instruction_pointer, _In_ ULONG_PTR vmm_stack_pointer)
Represents VMM related data shared across all processors.
Definition: vmm.h:29
struct VmControlStructure * vmxon_region
VA of a VMXON region.
Definition: vmm.h:40
bool EptIsEptAvailable()
Checks if the system supports EPT technology sufficient enough.
Definition: ept.cpp:163
Declares interfaces to utility functions.
ULONG64 UtilPaFromVa(void *va)
Definition: util.cpp:590
ULONG_PTR UtilReadMsr(Msr msr)
Definition: util.cpp:767
See: Definitions of Pin-Based VM-Execution Controls.
Definition: ia32_type.h:972
See: Definitions of Secondary Processor-Based VM-Execution Controls.
Definition: ia32_type.h:1022
See: Segment Selectors.
Definition: ia32_type.h:269
See: Segment Descriptor.
Definition: ia32_type.h:302
unsigned short limit
Definition: ia32_type.h:224
unsigned short ti
Table Indicator.
Definition: ia32_type.h:273
unsigned cd
[30] Cache Disable
Definition: ia32_type.h:137
See: Segment Descriptor.
Definition: ia32_type.h:281
USHORT __stdcall AsmReadFS()
Reads FS.
void EptInitializeMtrrEntries()
Reads and stores all MTRRs to set a correct memory type for EPT.
Definition: ept.cpp:193
void UtilLoadPdptes(ULONG_PTR cr3_value)
Definition: util.cpp:828
unsigned revision_identifier
[0:30]
Definition: ia32_type.h:1137
volatile long reference_count
Number of processors sharing this data.
Definition: vmm.h:30
unsigned int all
Definition: ia32_type.h:1056
ULONG_PTR __stdcall AsmLoadAccessRightsByte(_In_ ULONG_PTR segment_selector)
Loads access rights byte.
static ULONG VmpGetSegmentAccessRight(_In_ USHORT segment_selector)
static bool VmpInitializeVmcs(_Inout_ ProcessorData *processor_data)
ULONG64 base_low
Definition: ia32_type.h:285
NTSTATUS VmHotplugCallback(const PROCESSOR_NUMBER &proc_num)
Virtualizes the specified processor.
Definition: vm.cpp:997
static bool VmpIsHyperPlatformInstalled()
Definition: vm.cpp:981
unsigned memory_type
[50:53]
Definition: ia32_type.h:1144
VmxStatus UtilInvvpidAllContext()
Executes the INVVPID instruction (type 2)
Definition: util.cpp:812
unsigned use_io_bitmaps
[25]
Definition: ia32_type.h:1010
ULONG_PTR UtilVmRead(VmcsField field)
Definition: util.cpp:705
bool UtilIsX86Pae()
Checks if the system is a PAE-enabled x86 system.
Definition: util.cpp:512
void __stdcall AsmVmmEntryPoint()
An entry point of VMM where gets called whenever VM-exit occurred.
static const ULONG32 kHyperVCpuidInterface
A majority of modern hypervisors expose their signatures through CPUID with this CPUID function code ...
Definition: ia32_type.h:29
unsigned cr3_load_exiting
[15]
Definition: ia32_type.h:1001
See: BASIC VMX INFORMATION.
Definition: ia32_type.h:1134
See: ARCHITECTURAL MSRS.
Definition: ia32_type.h:1118
void UtilFreeContiguousMemory(void *base_address)
Definition: util.cpp:644
static SegmentDescriptor * VmpGetSegmentDescriptor(_In_ ULONG_PTR descriptor_table_base, _In_ USHORT segment_selector)
struct CpuFeaturesEcx::@9 fields
static SharedProcessorData * VmpInitializeSharedData()
Definition: vm.cpp:237
struct VmxVmExitControls::@24 fields
unsigned pg
[31] Paging Enabled
Definition: ia32_type.h:138
Declares and implements common things across the project.
unsigned activate_secondary_control
[31]
Definition: ia32_type.h:1016
Declares interfaces to VMM initialization functions.
unsigned ia32e_mode_guest
[9]
Definition: ia32_type.h:1085
ULONG64 UtilReadMsr64(Msr msr)
Definition: util.cpp:772
ULONG32 vmx
[5] Virtual Machine Technology
Definition: ia32_type.h:318
See: MEMORY-MANAGEMENT REGISTERS.
Definition: ia32_type.h:223
EptData * EptInitialization()
Builds EPT, allocates pre-allocated entires, initializes and returns EptData.
Definition: ept.cpp:399
static const SIZE_T kVmxMaxVmcsSize
See: OVERVIEW.
Definition: ia32_type.h:24
unsigned short all
Definition: ia32_type.h:270
#define HYPERPLATFORM_LOG_ERROR(format,...)
Definition: log.h:46
Declares interfaces to logging functions.
static NTSTATUS VmpStartVm(_In_opt_ void *context)
static void VmpLaunchVm()
Definition: vm.cpp:779
See: Definitions of VM-Exit Controls.
Definition: ia32_type.h:1055
ULONG64 EptGetEptPointer(EptData *ept_data)
Definition: ept.cpp:188
struct VmxVmEntryControls::@25 fields
NTSTATUS UtilForEachProcessor(NTSTATUS(*callback_routine)(void *), void *context)
Definition: util.cpp:412
Represents VMM related data associated with each processor.
Definition: vmm.h:37
unsigned short index
Definition: ia32_type.h:274
struct Cr4::@3 fields
VmxStatus UtilVmWrite64(VmcsField field, ULONG64 field_value)
Definition: util.cpp:744