HyperPlatform Programmer's Reference
vmm.cpp
Go to the documentation of this file.
1 // Copyright (c) 2015-2018, Satoshi Tanda. All rights reserved.
2 // Use of this source code is governed by a MIT-style license that can be
3 // found in the LICENSE file.
4 
7 
8 #include "vmm.h"
9 #include <intrin.h>
10 #include "asm.h"
11 #include "common.h"
12 #include "ept.h"
13 #include "log.h"
14 #include "util.h"
15 #include "performance.h"
16 
17 extern "C" {
19 //
20 // macro utilities
21 //
22 
24 //
25 // constants and macros
26 //
27 
28 // Whether VM-exit recording is enabled
29 static const bool kVmmpEnableRecordVmExit = false;
30 
31 // How many events should be recorded per a processor
32 static const long kVmmpNumberOfRecords = 100;
33 
34 // How many processors are supported for recording
35 static const long kVmmpNumberOfProcessors = 2;
36 
38 //
39 // types
40 //
41 
42 // Represents raw structure of stack of VMM when VmmVmExitHandler() is called
45  ULONG_PTR reserved;
47 };
48 
49 // Things need to be read and written by each VM-exit handler
50 struct GuestContext {
51  union {
54  };
56  ULONG_PTR ip;
57  ULONG_PTR cr8;
58  KIRQL irql;
60 };
61 #if defined(_AMD64_)
62 static_assert(sizeof(GuestContext) == 40, "Size check");
63 #else
64 static_assert(sizeof(GuestContext) == 20, "Size check");
65 #endif
66 
67 // Context at the moment of vmexit
68 struct VmExitHistory {
70  ULONG_PTR ip;
72  ULONG_PTR exit_qualification;
73  ULONG_PTR instruction_info;
74 };
75 
77 //
78 // prototypes
79 //
80 
81 bool __stdcall VmmVmExitHandler(_Inout_ VmmInitialStack *stack);
82 
83 DECLSPEC_NORETURN void __stdcall VmmVmxFailureHandler(
84  _Inout_ AllRegisters *all_regs);
85 
86 static void VmmpHandleVmExit(_Inout_ GuestContext *guest_context);
87 
88 DECLSPEC_NORETURN static void VmmpHandleTripleFault(
89  _Inout_ GuestContext *guest_context);
90 
91 DECLSPEC_NORETURN static void VmmpHandleUnexpectedExit(
92  _Inout_ GuestContext *guest_context);
93 
94 static void VmmpHandleMonitorTrap(_Inout_ GuestContext *guest_context);
95 
96 static void VmmpHandleException(_Inout_ GuestContext *guest_context);
97 
98 static void VmmpHandleCpuid(_Inout_ GuestContext *guest_context);
99 
100 static void VmmpHandleRdtsc(_Inout_ GuestContext *guest_context);
101 
102 static void VmmpHandleRdtscp(_Inout_ GuestContext *guest_context);
103 
104 static void VmmpHandleXsetbv(_Inout_ GuestContext *guest_context);
105 
106 static void VmmpHandleMsrReadAccess(_Inout_ GuestContext *guest_context);
107 
108 static void VmmpHandleMsrWriteAccess(_Inout_ GuestContext *guest_context);
109 
110 static void VmmpHandleMsrAccess(_Inout_ GuestContext *guest_context,
111  _In_ bool read_access);
112 
113 static void VmmpHandleGdtrOrIdtrAccess(_Inout_ GuestContext *guest_context);
114 
115 static void VmmpHandleLdtrOrTrAccess(_Inout_ GuestContext *guest_context);
116 
117 static void VmmpHandleDrAccess(_Inout_ GuestContext *guest_context);
118 
119 static void VmmpHandleIoPort(_Inout_ GuestContext *guest_context);
120 
121 static void VmmpHandleCrAccess(_Inout_ GuestContext *guest_context);
122 
123 static void VmmpHandleVmx(_Inout_ GuestContext *guest_context);
124 
125 static void VmmpHandleVmCall(_Inout_ GuestContext *guest_context);
126 
128  _Inout_ GuestContext *guest_context);
129 
130 static void VmmpHandleInvalidateTlbEntry(_Inout_ GuestContext *guest_context);
131 
132 static void VmmpHandleEptViolation(_Inout_ GuestContext *guest_context);
133 
134 static void VmmpHandleEptMisconfig(_Inout_ GuestContext *guest_context);
135 
136 static ULONG_PTR *VmmpSelectRegister(_In_ ULONG index,
137  _In_ GuestContext *guest_context);
138 
139 static void VmmpDumpGuestState();
140 
141 static void VmmpAdjustGuestInstructionPointer(_In_ GuestContext *guest_context);
142 
143 static void VmmpIoWrapper(_In_ bool to_memory, _In_ bool is_string,
144  _In_ SIZE_T size_of_access, _In_ unsigned short port,
145  _Inout_ void *address, _In_ unsigned long count);
146 
147 static void VmmpIndicateSuccessfulVmcall(_In_ GuestContext *guest_context);
148 
149 static void VmmpIndicateUnsuccessfulVmcall(_In_ GuestContext *guest_context);
150 
151 static void VmmpHandleVmCallTermination(_In_ GuestContext *guest_context,
152  _Inout_ void *context);
153 
154 static UCHAR VmmpGetGuestCpl();
155 
156 static void VmmpInjectInterruption(_In_ InterruptionType interruption_type,
157  _In_ InterruptionVector vector,
158  _In_ bool deliver_error_code,
159  _In_ ULONG32 error_code);
160 
161 static ULONG_PTR VmmpGetKernelCr3();
162 
164 //
165 // variables
166 //
167 
168 // Those variables are all for diagnostic purpose
172 
174 //
175 // implementations
176 //
177 
178 // A high level VMX handler called from AsmVmExitHandler().
179 // Return true for vmresume, or return false for vmxoff.
180 #pragma warning(push)
181 #pragma warning(disable : 28167)
182 _Use_decl_annotations_ bool __stdcall VmmVmExitHandler(VmmInitialStack *stack) {
183  // Save guest's context and raise IRQL as quick as possible
184  const auto guest_irql = KeGetCurrentIrql();
185  const auto guest_cr8 = IsX64() ? __readcr8() : 0;
186  if (guest_irql < DISPATCH_LEVEL) {
187  KeRaiseIrqlToDpcLevel();
188  }
189  NT_ASSERT(stack->reserved == MAXULONG_PTR);
190 
191  // Capture the current guest state
192  GuestContext guest_context = {stack,
195  guest_cr8,
196  guest_irql,
197  true};
198  guest_context.gp_regs->sp = UtilVmRead(VmcsField::kGuestRsp);
199 
200  // Dispatch the current VM-exit event
201  VmmpHandleVmExit(&guest_context);
202 
203  // See: Guidelines for Use of the INVVPID Instruction, and Guidelines for Use
204  // of the INVEPT Instruction
205  if (!guest_context.vm_continue) {
208  }
209 
210  // Restore guest's context
211  if (guest_context.irql < DISPATCH_LEVEL) {
212  KeLowerIrql(guest_context.irql);
213  }
214 
215  // Apply possibly updated CR8 by the handler
216  if (IsX64()) {
217  __writecr8(guest_context.cr8);
218  }
219  return guest_context.vm_continue;
220 }
221 #pragma warning(pop)
222 
223 // Dispatches VM-exit to a corresponding handler
224 _Use_decl_annotations_ static void VmmpHandleVmExit(
225  GuestContext *guest_context) {
227 
228  const VmExitInformation exit_reason = {
229  static_cast<ULONG32>(UtilVmRead(VmcsField::kVmExitReason))};
230 
232  // Save them for ease of trouble shooting
233  const auto processor = KeGetCurrentProcessorNumberEx(nullptr);
234  auto &index = g_vmmp_next_history_index[processor];
235  auto &history = g_vmmp_vm_exit_history[processor][index];
236 
237  history.gp_regs = *guest_context->gp_regs;
238  history.ip = guest_context->ip;
239  history.exit_reason = exit_reason;
240  history.exit_qualification = UtilVmRead(VmcsField::kExitQualification);
241  history.instruction_info = UtilVmRead(VmcsField::kVmxInstructionInfo);
242  if (++index == kVmmpNumberOfRecords) {
243  index = 0;
244  }
245  }
246 
247  switch (exit_reason.fields.reason) {
249  VmmpHandleException(guest_context);
250  break;
252  VmmpHandleTripleFault(guest_context);
253  break;
255  VmmpHandleCpuid(guest_context);
256  break;
258  VmmpHandleInvalidateInternalCaches(guest_context);
259  break;
261  VmmpHandleInvalidateTlbEntry(guest_context);
262  break;
264  VmmpHandleRdtsc(guest_context);
265  break;
267  VmmpHandleCrAccess(guest_context);
268  break;
270  VmmpHandleDrAccess(guest_context);
271  break;
273  VmmpHandleIoPort(guest_context);
274  break;
276  VmmpHandleMsrReadAccess(guest_context);
277  break;
279  VmmpHandleMsrWriteAccess(guest_context);
280  break;
282  VmmpHandleMonitorTrap(guest_context);
283  break;
285  VmmpHandleGdtrOrIdtrAccess(guest_context);
286  break;
288  VmmpHandleLdtrOrTrAccess(guest_context);
289  break;
291  VmmpHandleEptViolation(guest_context);
292  break;
294  VmmpHandleEptMisconfig(guest_context);
295  break;
297  VmmpHandleVmCall(guest_context);
298  break;
308  VmmpHandleVmx(guest_context);
309  break;
311  VmmpHandleRdtscp(guest_context);
312  break;
314  VmmpHandleXsetbv(guest_context);
315  break;
316  default:
317  VmmpHandleUnexpectedExit(guest_context);
318  break;
319  }
320 }
321 
322 // Triple fault VM-exit. Fatal error.
323 _Use_decl_annotations_ static void VmmpHandleTripleFault(
324  GuestContext *guest_context) {
327  reinterpret_cast<ULONG_PTR>(guest_context),
328  guest_context->ip, 0);
329 }
330 
331 // Unexpected VM-exit. Fatal error.
332 _Use_decl_annotations_ static void VmmpHandleUnexpectedExit(
333  GuestContext *guest_context) {
335  const auto qualification = UtilVmRead(VmcsField::kExitQualification);
337  reinterpret_cast<ULONG_PTR>(guest_context),
338  guest_context->ip, qualification);
339 }
340 
341 // MTF VM-exit
342 _Use_decl_annotations_ static void VmmpHandleMonitorTrap(
343  GuestContext *guest_context) {
346  reinterpret_cast<ULONG_PTR>(guest_context),
347  guest_context->ip, 0);
348 }
349 
350 // Interrupt
351 _Use_decl_annotations_ static void VmmpHandleException(
352  GuestContext *guest_context) {
354  const VmExitInterruptionInformationField exception = {
355  static_cast<ULONG32>(UtilVmRead(VmcsField::kVmExitIntrInfo))};
356  const auto interruption_type =
357  static_cast<InterruptionType>(exception.fields.interruption_type);
358  const auto vector = static_cast<InterruptionVector>(exception.fields.vector);
359 
360  if (interruption_type == InterruptionType::kHardwareException) {
361  // Hardware exception
363  // #PF
364  const PageFaultErrorCode fault_code = {
365  static_cast<ULONG32>(UtilVmRead(VmcsField::kVmExitIntrErrorCode))};
366  const auto fault_address = UtilVmRead(VmcsField::kExitQualification);
367 
368  VmmpInjectInterruption(interruption_type, vector, true, fault_code.all);
370  "GuestIp= %016Ix, #PF Fault= %016Ix Code= 0x%2x", guest_context->ip,
371  fault_address, fault_code.all);
372  AsmWriteCR2(fault_address);
373 
375  // # GP
376  const auto error_code =
377  static_cast<ULONG32>(UtilVmRead(VmcsField::kVmExitIntrErrorCode));
378 
379  VmmpInjectInterruption(interruption_type, vector, true, error_code);
380  HYPERPLATFORM_LOG_INFO_SAFE("GuestIp= %016Ix, #GP Code= 0x%2x",
381  guest_context->ip, error_code);
382 
383  } else {
385  0);
386  }
387 
388  } else if (interruption_type == InterruptionType::kSoftwareException) {
389  // Software exception
391  // #BP
392  VmmpInjectInterruption(interruption_type, vector, false, 0);
393  HYPERPLATFORM_LOG_INFO_SAFE("GuestIp= %016Ix, #BP ", guest_context->ip);
395 
396  } else {
398  0);
399  }
400  } else {
402  0);
403  }
404 }
405 
406 // CPUID
407 _Use_decl_annotations_ static void VmmpHandleCpuid(
408  GuestContext *guest_context) {
410  unsigned int cpu_info[4] = {};
411  const auto function_id = static_cast<int>(guest_context->gp_regs->ax);
412  const auto sub_function_id = static_cast<int>(guest_context->gp_regs->cx);
413 
414  __cpuidex(reinterpret_cast<int *>(cpu_info), function_id, sub_function_id);
415 
416  if (function_id == 1) {
417  // Present existence of a hypervisor using the HypervisorPresent bit
418  CpuFeaturesEcx cpu_features = {static_cast<ULONG_PTR>(cpu_info[2])};
419  cpu_features.fields.not_used = true;
420  cpu_info[2] = static_cast<int>(cpu_features.all);
421  } else if (function_id == kHyperVCpuidInterface) {
422  // Leave signature of HyperPlatform onto EAX
423  cpu_info[0] = 'PpyH';
424  }
425 
426  guest_context->gp_regs->ax = cpu_info[0];
427  guest_context->gp_regs->bx = cpu_info[1];
428  guest_context->gp_regs->cx = cpu_info[2];
429  guest_context->gp_regs->dx = cpu_info[3];
430 
431  VmmpAdjustGuestInstructionPointer(guest_context);
432 }
433 
434 // RDTSC
435 _Use_decl_annotations_ static void VmmpHandleRdtsc(
436  GuestContext *guest_context) {
438  ULARGE_INTEGER tsc = {};
439  tsc.QuadPart = __rdtsc();
440  guest_context->gp_regs->dx = tsc.HighPart;
441  guest_context->gp_regs->ax = tsc.LowPart;
442 
443  VmmpAdjustGuestInstructionPointer(guest_context);
444 }
445 
446 // RDTSCP
447 _Use_decl_annotations_ static void VmmpHandleRdtscp(
448  GuestContext *guest_context) {
450  unsigned int tsc_aux = 0;
451  ULARGE_INTEGER tsc = {};
452  tsc.QuadPart = __rdtscp(&tsc_aux);
453  guest_context->gp_regs->dx = tsc.HighPart;
454  guest_context->gp_regs->ax = tsc.LowPart;
455  guest_context->gp_regs->cx = tsc_aux;
456 
457  VmmpAdjustGuestInstructionPointer(guest_context);
458 }
459 
460 // XSETBV. It is executed at the time of system resuming
461 _Use_decl_annotations_ static void VmmpHandleXsetbv(
462  GuestContext *guest_context) {
464  ULARGE_INTEGER value = {};
465  value.LowPart = static_cast<ULONG>(guest_context->gp_regs->ax);
466  value.HighPart = static_cast<ULONG>(guest_context->gp_regs->dx);
467  _xsetbv(static_cast<ULONG>(guest_context->gp_regs->cx), value.QuadPart);
468 
469  VmmpAdjustGuestInstructionPointer(guest_context);
470 }
471 
472 // RDMSR
473 _Use_decl_annotations_ static void VmmpHandleMsrReadAccess(
474  GuestContext *guest_context) {
476  VmmpHandleMsrAccess(guest_context, true);
477 }
478 
479 // WRMSR
480 _Use_decl_annotations_ static void VmmpHandleMsrWriteAccess(
481  GuestContext *guest_context) {
483  VmmpHandleMsrAccess(guest_context, false);
484 }
485 
486 // RDMSR and WRMSR
487 _Use_decl_annotations_ static void VmmpHandleMsrAccess(
488  GuestContext *guest_context, bool read_access) {
489  // Apply it for VMCS instead of a real MSR if a specified MSR is either of
490  // them.
491  const auto msr = static_cast<Msr>(guest_context->gp_regs->cx);
492 
493  bool transfer_to_vmcs = false;
494  VmcsField vmcs_field = {};
495  switch (msr) {
497  vmcs_field = VmcsField::kGuestSysenterCs;
498  transfer_to_vmcs = true;
499  break;
501  vmcs_field = VmcsField::kGuestSysenterEsp;
502  transfer_to_vmcs = true;
503  break;
505  vmcs_field = VmcsField::kGuestSysenterEip;
506  transfer_to_vmcs = true;
507  break;
508  case Msr::kIa32Debugctl:
509  vmcs_field = VmcsField::kGuestIa32Debugctl;
510  transfer_to_vmcs = true;
511  break;
512  case Msr::kIa32GsBase:
513  vmcs_field = VmcsField::kGuestGsBase;
514  transfer_to_vmcs = true;
515  break;
516  case Msr::kIa32FsBase:
517  vmcs_field = VmcsField::kGuestFsBase;
518  transfer_to_vmcs = true;
519  break;
520  default:
521  break;
522  }
523 
524  const auto is_64bit_vmcs =
527 
528  LARGE_INTEGER msr_value = {};
529  if (read_access) {
530  if (transfer_to_vmcs) {
531  if (is_64bit_vmcs) {
532  msr_value.QuadPart = UtilVmRead64(vmcs_field);
533  } else {
534  msr_value.QuadPart = UtilVmRead(vmcs_field);
535  }
536  } else {
537  msr_value.QuadPart = UtilReadMsr64(msr);
538  }
539  guest_context->gp_regs->ax = msr_value.LowPart;
540  guest_context->gp_regs->dx = msr_value.HighPart;
541  } else {
542  msr_value.LowPart = static_cast<ULONG>(guest_context->gp_regs->ax);
543  msr_value.HighPart = static_cast<ULONG>(guest_context->gp_regs->dx);
544  if (transfer_to_vmcs) {
545  if (is_64bit_vmcs) {
546  UtilVmWrite64(vmcs_field, static_cast<ULONG_PTR>(msr_value.QuadPart));
547  } else {
548  UtilVmWrite(vmcs_field, static_cast<ULONG_PTR>(msr_value.QuadPart));
549  }
550  } else {
551  UtilWriteMsr64(msr, msr_value.QuadPart);
552  }
553  }
554 
555  VmmpAdjustGuestInstructionPointer(guest_context);
556 }
557 
558 // LIDT, SIDT, LGDT and SGDT
559 _Use_decl_annotations_ static void VmmpHandleGdtrOrIdtrAccess(
560  GuestContext *guest_context) {
562  const GdtrOrIdtrInstInformation instruction_info = {
563  static_cast<ULONG32>(UtilVmRead(VmcsField::kVmxInstructionInfo))};
564 
565  // Calculate an address to be used for the instruction
566  const auto displacement = UtilVmRead(VmcsField::kExitQualification);
567 
568  // Base
569  ULONG_PTR base_value = 0;
570  if (!instruction_info.fields.base_register_invalid) {
571  const auto register_used = VmmpSelectRegister(
572  instruction_info.fields.base_register, guest_context);
573  base_value = *register_used;
574  }
575 
576  // Index
577  ULONG_PTR index_value = 0;
578  if (!instruction_info.fields.index_register_invalid) {
579  const auto register_used = VmmpSelectRegister(
580  instruction_info.fields.index_register, guest_context);
581  index_value = *register_used;
582  switch (static_cast<Scaling>(instruction_info.fields.scalling)) {
583  case Scaling::kNoScaling:
584  index_value = index_value;
585  break;
586  case Scaling::kScaleBy2:
587  index_value = index_value * 2;
588  break;
589  case Scaling::kScaleBy4:
590  index_value = index_value * 4;
591  break;
592  case Scaling::kScaleBy8:
593  index_value = index_value * 8;
594  break;
595  default:
596  break;
597  }
598  }
599 
600  // clang-format off
601  ULONG_PTR segment_base = 0;
602  switch (instruction_info.fields.segment_register) {
603  case 0: segment_base = UtilVmRead(VmcsField::kGuestEsBase); break;
604  case 1: segment_base = UtilVmRead(VmcsField::kGuestCsBase); break;
605  case 2: segment_base = UtilVmRead(VmcsField::kGuestSsBase); break;
606  case 3: segment_base = UtilVmRead(VmcsField::kGuestDsBase); break;
607  case 4: segment_base = UtilVmRead(VmcsField::kGuestFsBase); break;
608  case 5: segment_base = UtilVmRead(VmcsField::kGuestGsBase); break;
609  default: HYPERPLATFORM_COMMON_DBG_BREAK(); break;
610  }
611  // clang-format on
612 
613  auto operation_address =
614  segment_base + base_value + index_value + displacement;
615  if (static_cast<AddressSize>(instruction_info.fields.address_size) ==
617  operation_address &= MAXULONG;
618  }
619 
620  // Update CR3 with that of the guest since below code is going to access
621  // memory.
622  const auto guest_cr3 = VmmpGetKernelCr3();
623  const auto vmm_cr3 = __readcr3();
624  __writecr3(guest_cr3);
625 
626  // Emulate the instruction
627  auto descriptor_table_reg = reinterpret_cast<Idtr *>(operation_address);
628  switch (static_cast<GdtrOrIdtrInstructionIdentity>(
629  instruction_info.fields.instruction_identity)) {
631  descriptor_table_reg->base = UtilVmRead(VmcsField::kGuestGdtrBase);
632  descriptor_table_reg->limit =
633  static_cast<unsigned short>(UtilVmRead(VmcsField::kGuestGdtrLimit));
634  break;
636  descriptor_table_reg->base = UtilVmRead(VmcsField::kGuestIdtrBase);
637  descriptor_table_reg->limit =
638  static_cast<unsigned short>(UtilVmRead(VmcsField::kGuestIdtrLimit));
639  break;
641  UtilVmWrite(VmcsField::kGuestGdtrBase, descriptor_table_reg->base);
642  UtilVmWrite(VmcsField::kGuestGdtrLimit, descriptor_table_reg->limit);
643  break;
645  UtilVmWrite(VmcsField::kGuestIdtrBase, descriptor_table_reg->base);
646  UtilVmWrite(VmcsField::kGuestIdtrLimit, descriptor_table_reg->limit);
647  break;
648  }
649 
650  __writecr3(vmm_cr3);
651  VmmpAdjustGuestInstructionPointer(guest_context);
652 }
653 
654 // LLDT, LTR, SLDT, and STR
655 _Use_decl_annotations_ static void VmmpHandleLdtrOrTrAccess(
656  GuestContext *guest_context) {
658  const LdtrOrTrInstInformation instruction_info = {
659  static_cast<ULONG32>(UtilVmRead(VmcsField::kVmxInstructionInfo))};
660 
661  // Calculate an address or a register to be used for the instruction
662  const auto displacement = UtilVmRead(VmcsField::kExitQualification);
663 
664  ULONG_PTR operation_address = 0;
665  if (instruction_info.fields.register_access) {
666  // Register
667  const auto register_used =
668  VmmpSelectRegister(instruction_info.fields.register1, guest_context);
669  operation_address = reinterpret_cast<ULONG_PTR>(register_used);
670  } else {
671  // Base
672  ULONG_PTR base_value = 0;
673  if (!instruction_info.fields.base_register_invalid) {
674  const auto register_used = VmmpSelectRegister(
675  instruction_info.fields.base_register, guest_context);
676  base_value = *register_used;
677  }
678 
679  // Index
680  ULONG_PTR index_value = 0;
681  if (!instruction_info.fields.index_register_invalid) {
682  const auto register_used = VmmpSelectRegister(
683  instruction_info.fields.index_register, guest_context);
684  index_value = *register_used;
685  switch (static_cast<Scaling>(instruction_info.fields.scalling)) {
686  case Scaling::kNoScaling:
687  index_value = index_value;
688  break;
689  case Scaling::kScaleBy2:
690  index_value = index_value * 2;
691  break;
692  case Scaling::kScaleBy4:
693  index_value = index_value * 4;
694  break;
695  case Scaling::kScaleBy8:
696  index_value = index_value * 8;
697  break;
698  default:
699  break;
700  }
701  }
702 
703  // clang-format off
704  ULONG_PTR segment_base = 0;
705  switch (instruction_info.fields.segment_register) {
706  case 0: segment_base = UtilVmRead(VmcsField::kGuestEsBase); break;
707  case 1: segment_base = UtilVmRead(VmcsField::kGuestCsBase); break;
708  case 2: segment_base = UtilVmRead(VmcsField::kGuestSsBase); break;
709  case 3: segment_base = UtilVmRead(VmcsField::kGuestDsBase); break;
710  case 4: segment_base = UtilVmRead(VmcsField::kGuestFsBase); break;
711  case 5: segment_base = UtilVmRead(VmcsField::kGuestGsBase); break;
712  default: HYPERPLATFORM_COMMON_DBG_BREAK(); break;
713  }
714  // clang-format on
715 
716  operation_address = segment_base + base_value + index_value + displacement;
717  if (static_cast<AddressSize>(instruction_info.fields.address_size) ==
719  operation_address &= MAXULONG;
720  }
721  }
722 
723  // Update CR3 with that of the guest since below code is going to access
724  // memory.
725  const auto guest_cr3 = VmmpGetKernelCr3();
726  const auto vmm_cr3 = __readcr3();
727  __writecr3(guest_cr3);
728 
729  // Emulate the instruction
730  auto selector = reinterpret_cast<USHORT *>(operation_address);
731  switch (static_cast<LdtrOrTrInstructionIdentity>(
732  instruction_info.fields.instruction_identity)) {
734  *selector =
735  static_cast<USHORT>(UtilVmRead(VmcsField::kGuestLdtrSelector));
736  break;
738  *selector = static_cast<USHORT>(UtilVmRead(VmcsField::kGuestTrSelector));
739  break;
742  break;
745  // Set the Busy bit in TSS.
746  // See: LTR - Load Task Register
747  const SegmentSelector ss = {*selector};
748  const auto sd = reinterpret_cast<SegmentDescriptor *>(
750  ss.fields.index * sizeof(SegmentDescriptor));
751  sd->fields.type |= 2; // Set the Busy bit
752  break;
753  }
754  }
755 
756  __writecr3(vmm_cr3);
757  VmmpAdjustGuestInstructionPointer(guest_context);
758 }
759 
760 // MOV to / from DRx
761 _Use_decl_annotations_ static void VmmpHandleDrAccess(
762  GuestContext *guest_context) {
764 
765  // Normally, when the privileged instruction is executed at CPL3, #GP(0)
766  // occurs instead of VM-exit. However, access to the debug registers is
767  // exception. Inject #GP(0) in such case to emulate what the processor
768  // normally does. See: Instructions That Cause VM Exits Conditionally
769  if (VmmpGetGuestCpl() != 0) {
772  true, 0);
773  return;
774  }
775 
776  const MovDrQualification exit_qualification = {
778  auto debugl_register = exit_qualification.fields.debugl_register;
779 
780  // Access to DR4 and 5 causes #UD when CR4.DE (Debugging Extensions) is set.
781  // Otherwise, these registers are aliased to DR6 and 7 respectively.
782  // See: Debug Registers DR4 and DR5
783  if (debugl_register == 4 || debugl_register == 5) {
784  const Cr4 guest_cr4 = {UtilVmRead(VmcsField::kGuestCr4)};
785  if (guest_cr4.fields.de) {
788  0);
789  return;
790  } else if (debugl_register == 4) {
791  debugl_register = 6;
792  } else {
793  debugl_register = 7;
794  }
795  }
796 
797  // Access to any of DRs causes #DB when DR7.GD (General Detect Enable) is set.
798  // See: Debug Control Register (DR7)
799  Dr7 guest_dr7 = {UtilVmRead(VmcsField::kGuestDr7)};
800  if (guest_dr7.fields.gd) {
801  Dr6 guest_dr6 = {__readdr(6)};
802  // Clear DR6.B0-3 since the #DB being injected is not due to match of a
803  // condition specified in DR6. The processor is allowed to clear those bits
804  // as "Certain debug exceptions may clear bits 0-3."
805  guest_dr6.fields.b0 = false;
806  guest_dr6.fields.b1 = false;
807  guest_dr6.fields.b2 = false;
808  guest_dr6.fields.b3 = false;
809  // "When such a condition is detected, the BD flag in debug status register
810  // DR6 is set prior to generating the exception."
811  guest_dr6.fields.bd = true;
812  __writedr(6, guest_dr6.all);
813 
816 
817  // While the processor clears the DR7.GD bit on #DB ("The processor clears
818  // the GD flag upon entering to the debug exception handler"), it does not
819  // change that in the VMCS. Emulate that behavior here. Note that this bit
820  // should actually be cleared by intercepting #DB and in the handler instead
821  // of here, since the processor clears it on any #DB. We do not do that as
822  // we do not intercept #DB as-is.
823  guest_dr7.fields.gd = false;
825  return;
826  }
827 
828  const auto register_used =
829  VmmpSelectRegister(exit_qualification.fields.gp_register, guest_context);
830  const auto direction =
831  static_cast<MovDrDirection>(exit_qualification.fields.direction);
832 
833  // In 64-bit mode, the upper 32 bits of DR6 and DR7 are reserved and must be
834  // written with zeros. Writing 1 to any of the upper 32 bits results in a
835  // #GP(0) exception. See: Debug Registers and Intel® 64 Processors
836  if (IsX64() && direction == MovDrDirection::kMoveToDr) {
837  const auto value64 = static_cast<ULONG64>(*register_used);
838  if ((debugl_register == 6 || debugl_register == 7) && (value64 >> 32)) {
841  true, 0);
842  return;
843  }
844  }
845 
846  switch (direction) {
848  switch (debugl_register) {
849  // clang-format off
850  case 0: __writedr(0, *register_used); break;
851  case 1: __writedr(1, *register_used); break;
852  case 2: __writedr(2, *register_used); break;
853  case 3: __writedr(3, *register_used); break;
854  // clang-format on
855  case 6: {
856  // Make sure that we write 0 and 1 into the bits that are stated to be
857  // so. The Intel SDM does not appear to state what happens when the
858  // processor attempts to write 1 to the always 0 bits, and vice versa,
859  // however, observation is that writes to those bits are ignored
860  // *as long as it is done on the non-root mode*, and other hypervisors
861  // emulate in that way as well.
862  Dr6 write_value = {*register_used};
863  write_value.fields.reserved1 |= ~write_value.fields.reserved1;
864  write_value.fields.reserved2 = 0;
865  write_value.fields.reserved3 |= ~write_value.fields.reserved3;
866  __writedr(6, write_value.all);
867  break;
868  }
869  case 7: {
870  // Similar to the case of CR6, enforce always 1 and 0 behavior.
871  Dr7 write_value = {*register_used};
872  write_value.fields.reserved1 |= ~write_value.fields.reserved1;
873  write_value.fields.reserved2 = 0;
874  write_value.fields.reserved3 = 0;
875  UtilVmWrite(VmcsField::kGuestDr7, write_value.all);
876  break;
877  }
878  default:
879  break;
880  }
881  break;
883  // clang-format off
884  switch (debugl_register) {
885  case 0: *register_used = __readdr(0); break;
886  case 1: *register_used = __readdr(1); break;
887  case 2: *register_used = __readdr(2); break;
888  case 3: *register_used = __readdr(3); break;
889  case 6: *register_used = __readdr(6); break;
890  case 7: *register_used = UtilVmRead(VmcsField::kGuestDr7); break;
891  default: break;
892  }
893  // clang-format on
894  break;
895  default:
897  0);
898  break;
899  }
900 
901  VmmpAdjustGuestInstructionPointer(guest_context);
902 }
903 
904 // IN, INS, OUT, OUTS
905 _Use_decl_annotations_ static void VmmpHandleIoPort(
906  GuestContext *guest_context) {
907  const IoInstQualification exit_qualification = {
909 
910  const auto is_in = exit_qualification.fields.direction == 1; // to memory?
911  const auto is_string = exit_qualification.fields.string_instruction == 1;
912  const auto is_rep = exit_qualification.fields.rep_prefixed == 1;
913  const auto port = static_cast<USHORT>(exit_qualification.fields.port_number);
914  const auto string_address = reinterpret_cast<void *>(
915  (is_in) ? guest_context->gp_regs->di : guest_context->gp_regs->si);
916  const auto count =
917  static_cast<unsigned long>((is_rep) ? guest_context->gp_regs->cx : 1);
918  const auto address =
919  (is_string) ? string_address : &guest_context->gp_regs->ax;
920 
921  SIZE_T size_of_access = 0;
922  const char *suffix = "";
923  switch (static_cast<IoInstSizeOfAccess>(
924  exit_qualification.fields.size_of_access)) {
926  size_of_access = 1;
927  suffix = "B";
928  break;
930  size_of_access = 2;
931  suffix = "W";
932  break;
934  size_of_access = 4;
935  suffix = "D";
936  break;
937  }
938 
939  HYPERPLATFORM_LOG_DEBUG_SAFE("GuestIp= %016Ix, Port= %04x, %s%s%s",
940  guest_context->ip, port, (is_in ? "IN" : "OUT"),
941  (is_string ? "S" : ""),
942  (is_string ? suffix : ""));
943 
944  VmmpIoWrapper(is_in, is_string, size_of_access, port, address, count);
945 
946  // Update RCX, RDI and RSI accordingly. Note that this code can handle only
947  // the REP prefix.
948  if (is_string) {
949  const auto update_count = (is_rep) ? guest_context->gp_regs->cx : 1;
950  const auto update_size = update_count * size_of_access;
951  const auto update_register =
952  (is_in) ? &guest_context->gp_regs->di : &guest_context->gp_regs->si;
953 
954  if (guest_context->flag_reg.fields.df) {
955  *update_register = *update_register - update_size;
956  } else {
957  *update_register = *update_register + update_size;
958  }
959 
960  if (is_rep) {
961  guest_context->gp_regs->cx = 0;
962  }
963  }
964 
965  VmmpAdjustGuestInstructionPointer(guest_context);
966 }
967 
968 // Perform IO instruction according with parameters
969 _Use_decl_annotations_ static void VmmpIoWrapper(bool to_memory, bool is_string,
970  SIZE_T size_of_access,
971  unsigned short port,
972  void *address,
973  unsigned long count) {
974  NT_ASSERT(size_of_access == 1 || size_of_access == 2 || size_of_access == 4);
975 
976  // Update CR3 with that of the guest since below code is going to access
977  // memory.
978  const auto guest_cr3 = VmmpGetKernelCr3();
979  const auto vmm_cr3 = __readcr3();
980  __writecr3(guest_cr3);
981 
982  // clang-format off
983  if (to_memory) {
984  if (is_string) {
985  // INS
986  switch (size_of_access) {
987  case 1: __inbytestring(port, reinterpret_cast<UCHAR*>(address), count); break;
988  case 2: __inwordstring(port, reinterpret_cast<USHORT*>(address), count); break;
989  case 4: __indwordstring(port, reinterpret_cast<ULONG*>(address), count); break;
990  }
991  } else {
992  // IN
993  switch (size_of_access) {
994  case 1: *reinterpret_cast<UCHAR*>(address) = __inbyte(port); break;
995  case 2: *reinterpret_cast<USHORT*>(address) = __inword(port); break;
996  case 4: *reinterpret_cast<ULONG*>(address) = __indword(port); break;
997  }
998  }
999  } else {
1000  if (is_string) {
1001  // OUTS
1002  switch (size_of_access) {
1003  case 1: __outbytestring(port, reinterpret_cast<UCHAR*>(address), count); break;
1004  case 2: __outwordstring(port, reinterpret_cast<USHORT*>(address), count); break;
1005  case 4: __outdwordstring(port, reinterpret_cast<ULONG*>(address), count); break;
1006  }
1007  } else {
1008  // OUT
1009  switch (size_of_access) {
1010  case 1: __outbyte(port, *reinterpret_cast<UCHAR*>(address)); break;
1011  case 2: __outword(port, *reinterpret_cast<USHORT*>(address)); break;
1012  case 4: __outdword(port, *reinterpret_cast<ULONG*>(address)); break;
1013  }
1014  }
1015  }
1016  // clang-format on
1017 
1018  __writecr3(vmm_cr3);
1019 }
1020 
1021 // MOV to / from CRx
1022 _Use_decl_annotations_ static void VmmpHandleCrAccess(
1023  GuestContext *guest_context) {
1025  const MovCrQualification exit_qualification = {
1027 
1028  const auto register_used =
1029  VmmpSelectRegister(exit_qualification.fields.gp_register, guest_context);
1030 
1031  switch (static_cast<MovCrAccessType>(exit_qualification.fields.access_type)) {
1033  switch (exit_qualification.fields.control_register) {
1034  // CR0 <- Reg
1035  case 0: {
1037  if (UtilIsX86Pae()) {
1039  }
1040  const Cr0 cr0_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed0)};
1041  const Cr0 cr0_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed1)};
1042  Cr0 cr0 = {*register_used};
1043  cr0.all &= cr0_fixed1.all;
1044  cr0.all |= cr0_fixed0.all;
1047  break;
1048  }
1049 
1050  // CR3 <- Reg
1051  case 3: {
1053  if (UtilIsX86Pae()) {
1054  UtilLoadPdptes(*register_used);
1055  }
1056  // Under some circumstances MOV to CR3 is not *required* to flush TLB
1057  // entries, but also NOT prohibited to do so. Therefore, we flush it
1058  // all time.
1059  // See: Operations that Invalidate TLBs and Paging-Structure Caches
1061  static_cast<USHORT>(KeGetCurrentProcessorNumberEx(nullptr) + 1));
1062 
1063  // The MOV to CR3 does not modify the bit63 of CR3. Emulate this
1064  // behavior.
1065  // See: MOV - Move to/from Control Registers
1066  UtilVmWrite(VmcsField::kGuestCr3, (*register_used & ~(1ULL << 63)));
1067  break;
1068  }
1069 
1070  // CR4 <- Reg
1071  case 4: {
1073  if (UtilIsX86Pae()) {
1075  }
1077  const Cr4 cr4_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed0)};
1078  const Cr4 cr4_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed1)};
1079  Cr4 cr4 = {*register_used};
1080  cr4.all &= cr4_fixed1.all;
1081  cr4.all |= cr4_fixed0.all;
1084  break;
1085  }
1086 
1087  // CR8 <- Reg
1088  case 8: {
1090  guest_context->cr8 = *register_used;
1091  break;
1092  }
1093 
1094  default:
1096  0, 0);
1097  break;
1098  }
1099  break;
1100 
1102  switch (exit_qualification.fields.control_register) {
1103  // Reg <- CR3
1104  case 3: {
1106  *register_used = UtilVmRead(VmcsField::kGuestCr3);
1107  break;
1108  }
1109 
1110  // Reg <- CR8
1111  case 8: {
1113  *register_used = guest_context->cr8;
1114  break;
1115  }
1116 
1117  default:
1119  0, 0);
1120  break;
1121  }
1122  break;
1123 
1124  // Unimplemented
1127  default:
1129  break;
1130  }
1131 
1132  VmmpAdjustGuestInstructionPointer(guest_context);
1133 }
1134 
1135 // VMX instructions except for VMCALL
1136 _Use_decl_annotations_ static void VmmpHandleVmx(GuestContext *guest_context) {
1138  // See: CONVENTIONS
1139  guest_context->flag_reg.fields.cf = true; // Error without status
1140  guest_context->flag_reg.fields.pf = false;
1141  guest_context->flag_reg.fields.af = false;
1142  guest_context->flag_reg.fields.zf = false; // Error without status
1143  guest_context->flag_reg.fields.sf = false;
1144  guest_context->flag_reg.fields.of = false;
1146  VmmpAdjustGuestInstructionPointer(guest_context);
1147 }
1148 
1149 // VMCALL
1150 _Use_decl_annotations_ static void VmmpHandleVmCall(
1151  GuestContext *guest_context) {
1152  // VMCALL convention for HyperPlatform:
1153  // ecx: hyper-call number (always 32bit)
1154  // edx: arbitrary context parameter (pointer size)
1155  // Any unsuccessful VMCALL will inject #UD into a guest
1156  const auto hypercall_number =
1157  static_cast<HypercallNumber>(guest_context->gp_regs->cx);
1158  const auto context = reinterpret_cast<void *>(guest_context->gp_regs->dx);
1159 
1160  switch (hypercall_number) {
1162  // Unloading requested. This VMCALL is allowed to execute only from CPL=0
1163  if (VmmpGetGuestCpl() == 0) {
1164  VmmpHandleVmCallTermination(guest_context, context);
1165  } else {
1166  VmmpIndicateUnsuccessfulVmcall(guest_context);
1167  }
1168  break;
1170  // Sample VMCALL handler
1171  HYPERPLATFORM_LOG_INFO_SAFE("Pong by VMM! (context = %p)", context);
1172  VmmpIndicateSuccessfulVmcall(guest_context);
1173  break;
1175  *reinterpret_cast<void **>(context) =
1176  guest_context->stack->processor_data->shared_data;
1177  VmmpIndicateSuccessfulVmcall(guest_context);
1178  break;
1179  default:
1180  // Unsupported hypercall
1181  VmmpIndicateUnsuccessfulVmcall(guest_context);
1182  }
1183 }
1184 
1185 // INVD
1186 _Use_decl_annotations_ static void VmmpHandleInvalidateInternalCaches(
1187  GuestContext *guest_context) {
1190  VmmpAdjustGuestInstructionPointer(guest_context);
1191 }
1192 
1193 // INVLPG
1194 _Use_decl_annotations_ static void VmmpHandleInvalidateTlbEntry(
1195  GuestContext *guest_context) {
1197  const auto invalidate_address =
1198  reinterpret_cast<void *>(UtilVmRead(VmcsField::kExitQualification));
1199  __invlpg(invalidate_address);
1201  static_cast<USHORT>(KeGetCurrentProcessorNumberEx(nullptr) + 1),
1202  invalidate_address);
1203  VmmpAdjustGuestInstructionPointer(guest_context);
1204 }
1205 
1206 // EXIT_REASON_EPT_VIOLATION
1207 _Use_decl_annotations_ static void VmmpHandleEptViolation(
1208  GuestContext *guest_context) {
1210  auto processor_data = guest_context->stack->processor_data;
1211  EptHandleEptViolation(processor_data->ept_data);
1212 }
1213 
1214 // EXIT_REASON_EPT_MISCONFIG
1215 _Use_decl_annotations_ static void VmmpHandleEptMisconfig(
1216  GuestContext *guest_context) {
1217  UNREFERENCED_PARAMETER(guest_context);
1218 
1219  const auto fault_address = UtilVmRead(VmcsField::kGuestPhysicalAddress);
1220  const auto ept_pt_entry = EptGetEptPtEntry(
1221  guest_context->stack->processor_data->ept_data, fault_address);
1223  fault_address,
1224  reinterpret_cast<ULONG_PTR>(ept_pt_entry), 0);
1225 }
1226 
1227 // Selects a register to be used based on the index
1228 _Use_decl_annotations_ static ULONG_PTR *VmmpSelectRegister(
1229  ULONG index, GuestContext *guest_context) {
1230  ULONG_PTR *register_used = nullptr;
1231  // clang-format off
1232  switch (index) {
1233  case 0: register_used = &guest_context->gp_regs->ax; break;
1234  case 1: register_used = &guest_context->gp_regs->cx; break;
1235  case 2: register_used = &guest_context->gp_regs->dx; break;
1236  case 3: register_used = &guest_context->gp_regs->bx; break;
1237  case 4: register_used = &guest_context->gp_regs->sp; break;
1238  case 5: register_used = &guest_context->gp_regs->bp; break;
1239  case 6: register_used = &guest_context->gp_regs->si; break;
1240  case 7: register_used = &guest_context->gp_regs->di; break;
1241 #if defined(_AMD64_)
1242  case 8: register_used = &guest_context->gp_regs->r8; break;
1243  case 9: register_used = &guest_context->gp_regs->r9; break;
1244  case 10: register_used = &guest_context->gp_regs->r10; break;
1245  case 11: register_used = &guest_context->gp_regs->r11; break;
1246  case 12: register_used = &guest_context->gp_regs->r12; break;
1247  case 13: register_used = &guest_context->gp_regs->r13; break;
1248  case 14: register_used = &guest_context->gp_regs->r14; break;
1249  case 15: register_used = &guest_context->gp_regs->r15; break;
1250 #endif
1251  default: HYPERPLATFORM_COMMON_DBG_BREAK(); break;
1252  }
1253  // clang-format on
1254  return register_used;
1255 }
1256 
1257 // Dumps guest state VMCS fields
1258 /*_Use_decl_annotations_*/ static void VmmpDumpGuestState() {
1259  // clang-format off
1268 
1270 
1290 
1294 
1301 
1312  // clang-format on
1313 }
1314 
1315 // Advances guest's IP to the next instruction
1316 _Use_decl_annotations_ static void VmmpAdjustGuestInstructionPointer(
1317  GuestContext *guest_context) {
1318  const auto exit_inst_length = UtilVmRead(VmcsField::kVmExitInstructionLen);
1319  UtilVmWrite(VmcsField::kGuestRip, guest_context->ip + exit_inst_length);
1320 
1321  // Inject #DB if TF is set
1322  if (guest_context->flag_reg.fields.tf) {
1326  }
1327 }
1328 
1329 // Handle VMRESUME or VMXOFF failure. Fatal error.
1330 _Use_decl_annotations_ void __stdcall VmmVmxFailureHandler(
1331  AllRegisters *all_regs) {
1332  const auto guest_ip = UtilVmRead(VmcsField::kGuestRip);
1333  // See: VM-Instruction Error Numbers
1334  const auto vmx_error = (all_regs->flags.fields.zf)
1336  : 0;
1339  guest_ip, 0);
1340 }
1341 
1342 // Indicates successful VMCALL
1343 _Use_decl_annotations_ static void VmmpIndicateSuccessfulVmcall(
1344  GuestContext *guest_context) {
1345  // See: CONVENTIONS
1346  guest_context->flag_reg.fields.cf = false;
1347  guest_context->flag_reg.fields.pf = false;
1348  guest_context->flag_reg.fields.af = false;
1349  guest_context->flag_reg.fields.zf = false;
1350  guest_context->flag_reg.fields.sf = false;
1351  guest_context->flag_reg.fields.of = false;
1352  guest_context->flag_reg.fields.cf = false;
1353  guest_context->flag_reg.fields.zf = false;
1355  VmmpAdjustGuestInstructionPointer(guest_context);
1356 }
1357 
1358 // Indicates unsuccessful VMCALL
1359 _Use_decl_annotations_ static void VmmpIndicateUnsuccessfulVmcall(
1360  GuestContext *guest_context) {
1361  UNREFERENCED_PARAMETER(guest_context);
1362 
1365  UtilVmWrite(VmcsField::kVmEntryInstructionLen, 3); // VMCALL is 3 bytes
1366 }
1367 
1368 // Handles an unloading request
1369 _Use_decl_annotations_ static void VmmpHandleVmCallTermination(
1370  GuestContext *guest_context, void *context) {
1371  // The processor sets ffff to limits of IDT and GDT when VM-exit occurred.
1372  // It is not correct value but fine to ignore since vmresume loads correct
1373  // values from VMCS. But here, we are going to skip vmresume and simply
1374  // return to where VMCALL is executed. It results in keeping those broken
1375  // values and ends up with bug check 109, so we should fix them manually.
1376  const auto gdt_limit = UtilVmRead(VmcsField::kGuestGdtrLimit);
1377  const auto gdt_base = UtilVmRead(VmcsField::kGuestGdtrBase);
1378  const auto idt_limit = UtilVmRead(VmcsField::kGuestIdtrLimit);
1379  const auto idt_base = UtilVmRead(VmcsField::kGuestIdtrBase);
1380  Gdtr gdtr = {static_cast<USHORT>(gdt_limit), gdt_base};
1381  Idtr idtr = {static_cast<USHORT>(idt_limit), idt_base};
1382  __lgdt(&gdtr);
1383  __lidt(&idtr);
1384 
1385  // Store an address of the management structure to the context parameter
1386  const auto result_ptr = reinterpret_cast<ProcessorData **>(context);
1387  *result_ptr = guest_context->stack->processor_data;
1388  HYPERPLATFORM_LOG_DEBUG_SAFE("Context at %p %p", context,
1389  guest_context->stack->processor_data);
1390 
1391  // Set rip to the next instruction of VMCALL
1392  const auto exit_instruction_length =
1394  const auto return_address = guest_context->ip + exit_instruction_length;
1395 
1396  // Since the flag register is overwritten after VMXOFF, we should manually
1397  // indicates that VMCALL was successful by clearing those flags.
1398  // See: CONVENTIONS
1399  guest_context->flag_reg.fields.cf = false;
1400  guest_context->flag_reg.fields.pf = false;
1401  guest_context->flag_reg.fields.af = false;
1402  guest_context->flag_reg.fields.zf = false;
1403  guest_context->flag_reg.fields.sf = false;
1404  guest_context->flag_reg.fields.of = false;
1405  guest_context->flag_reg.fields.cf = false;
1406  guest_context->flag_reg.fields.zf = false;
1407 
1408  // Set registers used after VMXOFF to recover the context. Volatile
1409  // registers must be used because those changes are reflected to the
1410  // guest's context after VMXOFF.
1411  guest_context->gp_regs->cx = return_address;
1412  guest_context->gp_regs->dx = guest_context->gp_regs->sp;
1413  guest_context->gp_regs->ax = guest_context->flag_reg.all;
1414  guest_context->vm_continue = false;
1415 }
1416 
1417 // Returns guest's CPL
1418 /*_Use_decl_annotations_*/ static UCHAR VmmpGetGuestCpl() {
1420  static_cast<unsigned int>(UtilVmRead(VmcsField::kGuestSsArBytes))};
1421  return ar.fields.dpl;
1422 }
1423 
1424 // Injects interruption to a guest
1425 _Use_decl_annotations_ static void VmmpInjectInterruption(
1426  InterruptionType interruption_type, InterruptionVector vector,
1427  bool deliver_error_code, ULONG32 error_code) {
1429  inject.fields.valid = true;
1430  inject.fields.interruption_type = static_cast<ULONG32>(interruption_type);
1431  inject.fields.vector = static_cast<ULONG32>(vector);
1432  inject.fields.deliver_error_code = deliver_error_code;
1434 
1435  if (deliver_error_code) {
1437  }
1438 }
1439 
1440 // Returns a kernel CR3 value of the current process;
1441 /*_Use_decl_annotations_*/ static ULONG_PTR VmmpGetKernelCr3() {
1442  auto guest_cr3 = UtilVmRead(VmcsField::kGuestCr3);
1443  // Assume it is an user-mode CR3 when the lowest bit is set. If so, get CR3
1444  // from _KPROCESS::DirectoryTableBase.
1445  if (guest_cr3 & 1) {
1446  static const long kDirectoryTableBaseOffsetX64 = 0x28;
1447  static const long kDirectoryTableBaseOffsetX86 = 0x18;
1448  auto process = reinterpret_cast<PUCHAR>(PsGetCurrentProcess());
1449  if (IsX64()) {
1450  guest_cr3 =
1451  *reinterpret_cast<PULONG_PTR>(process + kDirectoryTableBaseOffsetX64);
1452  } else {
1453  guest_cr3 =
1454  *reinterpret_cast<PULONG_PTR>(process + kDirectoryTableBaseOffsetX86);
1455  }
1456  }
1457  return guest_cr3;
1458 }
1459 
1460 } // extern "C"
ULONG_PTR port_number
[16:31]
Definition: ia32_type.h:1382
A triple fault VM-exit occurred.
struct SegmentSelector::@7 fields
static void VmmpIndicateSuccessfulVmcall(_In_ GuestContext *guest_context)
static void VmmpHandleXsetbv(_Inout_ GuestContext *guest_context)
static void VmmpIndicateUnsuccessfulVmcall(_In_ GuestContext *guest_context)
ULONG32 valid
[31]
Definition: ia32_type.h:1641
HypercallNumber
Available command numbers for VMCALL.
Definition: util.h:68
struct EptData * ept_data
A pointer to EPT related data.
Definition: vmm.h:42
static DECLSPEC_NORETURN void VmmpHandleUnexpectedExit(_Inout_ GuestContext *guest_context)
static DECLSPEC_NORETURN void VmmpHandleTripleFault(_Inout_ GuestContext *guest_context)
ULONG_PTR all
Definition: ia32_type.h:145
ULONG_PTR access_type
[4:5]
Definition: ia32_type.h:1407
bool vm_continue
Definition: vmm.cpp:59
ULONG_PTR pf
[2] Parity flag
Definition: ia32_type.h:47
struct MovCrQualification::@39 fields
ULONG32 index_register_invalid
[22]
Definition: ia32_type.h:1279
VmxStatus UtilVmWrite(VmcsField field, ULONG_PTR field_value)
Definition: util.cpp:737
See: CONTROL REGISTERS.
Definition: ia32_type.h:122
ULONG32 interruption_type
[8:10]
Definition: ia32_type.h:1638
ULONG_PTR ax
Definition: ia32_type.h:100
struct IoInstQualification::@38 fields
static void VmmpHandleLdtrOrTrAccess(_Inout_ GuestContext *guest_context)
static ULONG g_vmmp_next_history_index[kVmmpNumberOfProcessors]
Definition: vmm.cpp:169
ULONG32 scalling
[0:1]
Definition: ia32_type.h:1271
InterruptionType
See: Format of the VM-Entry Interruption-Information Field.
Definition: ia32_type.h:1647
static void VmmpHandleVmExit(_Inout_ GuestContext *guest_context)
#define HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE()
Definition: performance.h:30
EptCommonEntry * EptGetEptPtEntry(EptData *ept_data, ULONG64 physical_address)
Definition: ept.cpp:702
ULONG_PTR gp_register
[8:11]
Definition: ia32_type.h:1365
ULONG32 vector
[0:7]
Definition: ia32_type.h:1637
KIRQL irql
Definition: vmm.cpp:58
FlagRegister flags
Definition: ia32_type.h:113
#define HYPERPLATFORM_COMMON_BUG_CHECK(hp_bug_check_code, param1, param2, param3)
Issues a bug check.
Definition: common.h:69
ULONG32 base_register
[23:26]
Definition: ia32_type.h:1335
Declares interfaces to performance measurement functions.
static void VmmpHandleVmCallTermination(_In_ GuestContext *guest_context, _Inout_ void *context)
constexpr bool IsX64()
Checks if a system is x64.
Definition: common.h:128
VmmInitialStack * stack
Definition: vmm.cpp:52
Represents a stack layout after PUSHAD.
Definition: ia32_type.h:92
#define HYPERPLATFORM_COMMON_DBG_BREAK()
Sets a break point that works only when a debugger is present.
Definition: common.h:55
ULONG32 not_used
[31] Always 0 (a.k.a. HypervisorPresent)
Definition: ia32_type.h:344
static void VmmpHandleVmx(_Inout_ GuestContext *guest_context)
ULONG_PTR all
Definition: ia32_type.h:192
ULONG32 instruction_identity
[28:29]
Definition: ia32_type.h:1282
static void VmmpHandleIoPort(_Inout_ GuestContext *guest_context)
ULONG_PTR af
[4] Borrow flag
Definition: ia32_type.h:49
EPT misconfiguration VM-exit occurred.
ULONG_PTR cf
[0] Carry flag
Definition: ia32_type.h:45
struct SegmentDescriptor::@8 fields
VmxExitReason reason
[0:15]
Definition: ia32_type.h:1222
struct VmExitInformation::@32 fields
static void VmmpHandleMsrReadAccess(_Inout_ GuestContext *guest_context)
ULONG_PTR tf
[8] Trap flag
Definition: ia32_type.h:53
See: CONTROL REGISTERS.
Definition: ia32_type.h:144
unsigned reserved1
[4:11] Always 1
Definition: ia32_type.h:179
FlagRegister flag_reg
Definition: vmm.cpp:55
See: Guest Register State.
Definition: ia32_type.h:1099
GpRegisters gp_regs
Definition: vmm.cpp:44
ULONG_PTR string_instruction
[4]
Definition: ia32_type.h:1378
unsigned bd
[13] Debug Register Access Detected
Definition: ia32_type.h:181
ULONG_PTR rep_prefixed
[5]
Definition: ia32_type.h:1379
static void VmmpHandleRdtscp(_Inout_ GuestContext *guest_context)
ULONG_PTR all
Definition: ia32_type.h:123
ULONG64 UtilVmRead64(VmcsField field)
Definition: util.cpp:718
unsigned de
[3] Debugging Extensions
Definition: ia32_type.h:150
Declares interfaces to EPT functions.
ULONG32 instruction_identity
[28:29]
Definition: ia32_type.h:1337
static void VmmpHandleInvalidateTlbEntry(_Inout_ GuestContext *guest_context)
ULONG_PTR di
Definition: ia32_type.h:93
VmxStatus UtilInvvpidIndividualAddress(USHORT vpid, void *address)
Definition: util.cpp:794
ULONG_PTR sp
Definition: ia32_type.h:96
GpRegisters gp_regs
Definition: vmm.cpp:69
VmxStatus UtilInveptGlobal()
Executes the INVEPT instruction and invalidates EPT entry cache.
Definition: util.cpp:787
ULONG32 base_register_invalid
[27]
Definition: ia32_type.h:1336
ULONG_PTR df
[10] Direction flag
Definition: ia32_type.h:55
VmxStatus UtilInvvpidSingleContextExceptGlobal(USHORT vpid)
Definition: util.cpp:820
bool __stdcall VmmVmExitHandler(_Inout_ VmmInitialStack *stack)
SharedProcessorData * shared_data
Shared data.
Definition: vmm.h:38
struct VmxRegmentDescriptorAccessRight::@26 fields
struct FlagRegister::@1 fields
unsigned b0
[0] Breakpoint Condition Detected 0
Definition: ia32_type.h:175
static const bool kVmmpEnableRecordVmExit
Definition: vmm.cpp:29
ULONG32 address_size
[7:9]
Definition: ia32_type.h:1329
ULONG_PTR all
Definition: ia32_type.h:43
unsigned gd
[13] General Detect Enable
Definition: ia32_type.h:207
#define HYPERPLATFORM_LOG_INFO_SAFE(format,...)
Definition: log.h:62
See: Page-Fault Error Code.
Definition: ia32_type.h:635
ULONG_PTR dx
Definition: ia32_type.h:98
GpRegisters * gp_regs
Definition: vmm.cpp:53
ULONG32 address_size
[7:9]
Definition: ia32_type.h:1273
Declares interfaces to VMM functions.
ULONG_PTR control_register
[0:3]
Definition: ia32_type.h:1406
void EptHandleEptViolation(EptData *ept_data)
Definition: ept.cpp:647
See: Exit Qualification for Control-Register Accesses.
Definition: ia32_type.h:1403
void UtilWriteMsr64(Msr msr, ULONG64 value)
Definition: util.cpp:782
struct VmEntryInterruptionInformationField::@49 fields
See: Exit Qualification for MOV DR.
Definition: ia32_type.h:1358
unsigned reserved3
[17:31] Always 1
Definition: ia32_type.h:185
An unspecified bug occurred.
Declares interfaces to assembly functions.
unsigned reserved2
[12] Always 0
Definition: ia32_type.h:180
ULONG_PTR bx
Definition: ia32_type.h:97
ULONG32 all
Definition: ia32_type.h:1635
See: Feature Information Returned in the ECX Register.
Definition: ia32_type.h:310
Msr
See: MODEL-SPECIFIC REGISTERS (MSRS)
Definition: ia32_type.h:576
static void VmmpInjectInterruption(_In_ InterruptionType interruption_type, _In_ InterruptionVector vector, _In_ bool deliver_error_code, _In_ ULONG32 error_code)
ULONG_PTR gp_register
[8:11]
Definition: ia32_type.h:1410
ULONG_PTR all
Definition: ia32_type.h:173
static void VmmpIoWrapper(_In_ bool to_memory, _In_ bool is_string, _In_ SIZE_T size_of_access, _In_ unsigned short port, _Inout_ void *address, _In_ unsigned long count)
See: SYSTEM FLAGS AND FIELDS IN THE EFLAGS REGISTER.
Definition: ia32_type.h:42
static void VmmpAdjustGuestInstructionPointer(_In_ GuestContext *guest_context)
unsigned reserved3
[14:15] Always 0
Definition: ia32_type.h:208
Declares interfaces to utility functions.
See: Format of the VM-Entry Interruption-Information Field.
Definition: ia32_type.h:1634
ULONG_PTR zf
[6] Zero flag
Definition: ia32_type.h:51
See: Format of Exit Reason in Basic VM-Exit Information.
Definition: ia32_type.h:1219
ULONG_PTR direction
[4]
Definition: ia32_type.h:1363
ULONG_PTR UtilReadMsr(Msr msr)
Definition: util.cpp:767
unsigned b3
[3] Breakpoint Condition Detected 3
Definition: ia32_type.h:178
See: Debug Control Register (DR7)
Definition: ia32_type.h:191
VmcsField
See: FIELD ENCODING IN VMCS.
Definition: ia32_type.h:648
ULONG32 scalling
[0:1]
Definition: ia32_type.h:1326
static void VmmpHandleRdtsc(_Inout_ GuestContext *guest_context)
See: Segment Selectors.
Definition: ia32_type.h:269
See: Format of the VM-Exit Interruption-Information Field.
Definition: ia32_type.h:1620
ULONG_PTR direction
[3]
Definition: ia32_type.h:1377
struct GdtrOrIdtrInstInformation::@35 fields
ULONG32 base_register_invalid
[27]
Definition: ia32_type.h:1281
See: Segment Descriptor.
Definition: ia32_type.h:281
static void VmmpHandleEptViolation(_Inout_ GuestContext *guest_context)
DECLSPEC_NORETURN void __stdcall VmmVmxFailureHandler(_Inout_ AllRegisters *all_regs)
ULONG_PTR reserved
Definition: vmm.cpp:45
ULONG_PTR debugl_register
[0:2]
Definition: ia32_type.h:1361
InterruptionVector
See: Format of the VM-Entry Interruption-Information Field.
Definition: ia32_type.h:1659
ProcessorData * processor_data
Definition: vmm.cpp:46
void UtilLoadPdptes(ULONG_PTR cr3_value)
Definition: util.cpp:828
ULONG_PTR sf
[7] Sign flag
Definition: ia32_type.h:52
static void VmmpHandleCpuid(_Inout_ GuestContext *guest_context)
unsigned b1
[1] Breakpoint Condition Detected 1
Definition: ia32_type.h:176
ULONG32 all
Definition: ia32_type.h:311
ULONG32 register1
[3:6]
Definition: ia32_type.h:1328
static void VmmpHandleGdtrOrIdtrAccess(_Inout_ GuestContext *guest_context)
static void VmmpHandleEptMisconfig(_Inout_ GuestContext *guest_context)
static const long kVmmpNumberOfRecords
Definition: vmm.cpp:32
static void VmmpHandleVmCall(_Inout_ GuestContext *guest_context)
ULONG32 index_register
[18:21]
Definition: ia32_type.h:1278
struct VmExitInterruptionInformationField::@48 fields
static void VmmpHandleInvalidateInternalCaches(_Inout_ GuestContext *guest_context)
#define HYPERPLATFORM_LOG_DEBUG_SAFE(format,...)
Buffers a message as respective severity.
Definition: log.h:57
See: Exit Qualification for I/O Instructions.
Definition: ia32_type.h:1373
ULONG32 segment_register
[15:17]
Definition: ia32_type.h:1332
unsigned b2
[2] Breakpoint Condition Detected 2
Definition: ia32_type.h:177
ULONG32 index_register
[18:21]
Definition: ia32_type.h:1333
static ULONG_PTR VmmpGetKernelCr3()
Definition: vmm.cpp:1441
ULONG_PTR si
Definition: ia32_type.h:94
VmxStatus UtilInvvpidAllContext()
Executes the INVVPID instruction (type 2)
Definition: util.cpp:812
ULONG32 segment_register
[15:17]
Definition: ia32_type.h:1277
ULONG_PTR UtilVmRead(VmcsField field)
Definition: util.cpp:705
bool UtilIsX86Pae()
Checks if the system is a PAE-enabled x86 system.
Definition: util.cpp:512
static const long kVmmpNumberOfProcessors
Definition: vmm.cpp:35
static const ULONG32 kHyperVCpuidInterface
A majority of modern hypervisors expose their signatures through CPUID with this CPUID function code ...
Definition: ia32_type.h:29
static void VmmpHandleMsrAccess(_Inout_ GuestContext *guest_context, _In_ bool read_access)
struct MovDrQualification::@37 fields
MovDrDirection
See: Exit Qualification for MOV DR.
Definition: ia32_type.h:1352
ULONG32 deliver_error_code
[11]
Definition: ia32_type.h:1639
void __stdcall AsmWriteCR2(_In_ ULONG_PTR cr2_value)
Writes to CR2.
See: Format of the VM-Exit Instruction-Information Field as Used for LLDT, LTR, SLDT, and STR.
Definition: ia32_type.h:1323
static void VmmpHandleDrAccess(_Inout_ GuestContext *guest_context)
struct CpuFeaturesEcx::@9 fields
struct Dr7::@5 fields
Sends ping to the VMM.
See: Debug Status Register (DR6)
Definition: ia32_type.h:172
ULONG_PTR bp
Definition: ia32_type.h:95
void __lgdt(_In_ void *gdtr)
Reads SGDT.
Definition: asm.h:165
ULONG32 base_register
[23:26]
Definition: ia32_type.h:1280
Declares and implements common things across the project.
static void VmmpHandleMonitorTrap(_Inout_ GuestContext *guest_context)
static void VmmpHandleCrAccess(_Inout_ GuestContext *guest_context)
VmExitInformation exit_reason
Definition: vmm.cpp:71
ULONG_PTR ip
Definition: vmm.cpp:56
ULONG64 UtilReadMsr64(Msr msr)
Definition: util.cpp:772
See: MEMORY-MANAGEMENT REGISTERS.
Definition: ia32_type.h:223
ULONG_PTR exit_qualification
Definition: vmm.cpp:72
static VmExitHistory g_vmmp_vm_exit_history[kVmmpNumberOfProcessors][kVmmpNumberOfRecords]
Definition: vmm.cpp:171
ULONG_PTR instruction_info
Definition: vmm.cpp:73
Declares interfaces to logging functions.
static void VmmpDumpGuestState()
Definition: vmm.cpp:1258
ULONG_PTR size_of_access
[0:2]
Definition: ia32_type.h:1376
ULONG32 register_access
[10]
Definition: ia32_type.h:1330
static UCHAR VmmpGetGuestCpl()
Definition: vmm.cpp:1418
void __stdcall AsmInvalidateInternalCaches()
Invalidates internal caches.
unsigned reserved1
[10] Always 1
Definition: ia32_type.h:204
ULONG_PTR cx
Definition: ia32_type.h:99
static ULONG_PTR * VmmpSelectRegister(_In_ ULONG index, _In_ GuestContext *guest_context)
constexpr bool UtilIsInBounds(_In_ const T &value, _In_ const T &min, _In_ const T &max)
Tests if value is in between min and max.
Definition: util.h:300
An unexpected VM-exit occurred.
static void VmmpHandleMsrWriteAccess(_Inout_ GuestContext *guest_context)
ULONG_PTR of
[11] Overflow flag
Definition: ia32_type.h:56
See: Format of the VM-Exit Instruction-Information Field as Used for LIDT, LGDT, SIDT, or SGDT.
Definition: ia32_type.h:1268
Represents VMM related data associated with each processor.
Definition: vmm.h:37
ULONG_PTR ip
Definition: vmm.cpp:70
struct Dr6::@4 fields
Represents a stack layout after a sequence of PUSHFx, PUSHAx.
Definition: ia32_type.h:111
static void VmmpHandleException(_Inout_ GuestContext *guest_context)
struct LdtrOrTrInstInformation::@36 fields
ULONG32 index_register_invalid
[22]
Definition: ia32_type.h:1334
ULONG_PTR cr8
Definition: vmm.cpp:57
unsigned short index
Definition: ia32_type.h:274
struct Cr4::@3 fields
unsigned reserved2
[12] Always 0
Definition: ia32_type.h:206
VmxStatus UtilVmWrite64(VmcsField field, ULONG64 field_value)
Definition: util.cpp:744