1 ; Copyright (c) 2015-2017, Satoshi Tanda. All rights reserved.
2 ; Use of this source code is governed by a MIT-style license that can be
3 ; found in the LICENSE file.
6 ; This module implements all assembler code
9 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
11 ; References to C functions
13 EXTERN VmmVmExitHandler : PROC
14 EXTERN VmmVmxFailureHandler : PROC
15 EXTERN UtilDumpGpRegisters : PROC
17 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
24 VMX_ERROR_WITH_STATUS EQU 1
25 VMX_ERROR_WITHOUT_STATUS EQU 2
27 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
32 ; Saves all general purpose registers to the stack
38 push -1 ; dummy for rsp
52 ; Loads all general purpose registers from the stack
65 add rsp, 8 ; dummy for rsp
72 ; Dumps all general purpose registers and a flag register.
73 ASM_DUMP_REGISTERS MACRO
76 mov rcx, rsp ; guest_context
78 add rdx, 8*17 ; stack_pointer
80 sub rsp, 28h ; 28h for alignment
81 call UtilDumpGpRegisters ; UtilDumpGpRegisters(guest_context, stack_pointer);
89 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
95 ; bool __stdcall AsmInitializeVm(
96 ; _In_ void (*vm_initialization_routine)(_In_ ULONG_PTR, _In_ ULONG_PTR,
98 ; _In_opt_ void *context);
100 ; RSP is not 16 bit aligned when it gets called, but the following odd
101 ; number (17 times) of push makes RSP 16 bit aligned.
111 call rax ; vm_initialization_routine(rsp, asmResumeVm, context)
116 xor rax, rax ; return false
119 ; This is where the virtualized guest start to execute after successful
122 nop ; keep this nop for ease of debugging
126 sub rsp, 8 ; align RSP
128 add rsp, 8 ; restore RSP
131 inc rax ; return true
135 ; void __stdcall AsmVmmEntryPoint();
136 AsmVmmEntryPoint PROC
137 ; No need to save the flag registers since it is restored from the VMCS at
138 ; the time of vmresume.
142 ; save volatile XMM registers
144 movaps xmmword ptr [rsp + 0h], xmm0
145 movaps xmmword ptr [rsp + 10h], xmm1
146 movaps xmmword ptr [rsp + 20h], xmm2
147 movaps xmmword ptr [rsp + 30h], xmm3
148 movaps xmmword ptr [rsp + 40h], xmm4
149 movaps xmmword ptr [rsp + 50h], xmm5
152 call VmmVmExitHandler ; bool vm_continue = VmmVmExitHandler(guest_context);
155 ; restore XMM registers
156 movaps xmm0, xmmword ptr [rsp + 0h]
157 movaps xmm1, xmmword ptr [rsp + 10h]
158 movaps xmm2, xmmword ptr [rsp + 20h]
159 movaps xmm3, xmmword ptr [rsp + 30h]
160 movaps xmm4, xmmword ptr [rsp + 40h]
161 movaps xmm5, xmmword ptr [rsp + 50h]
165 jz exitVm ; if (!vm_continue) jmp exitVm
172 ; Executes vmxoff and ends virtualization
173 ; rax = Guest's rflags
175 ; rcx = Guest's rip for the next instruction
178 jz vmxError ; if (ZF) jmp
179 jc vmxError ; if (CF) jmp
181 popfq ; rflags <= GurstFlags
182 mov rsp, rdx ; rsp <= GuestRsp
184 ret ; jmp AddressToReturn
187 ; Diagnose a critical error
190 mov rcx, rsp ; all_regs
192 sub rsp, 28h ; 28h for alignment
193 call VmmVmxFailureHandler ; VmmVmxFailureHandler(all_regs);
196 AsmVmmEntryPoint ENDP
198 ; unsigned char __stdcall AsmVmxCall(_In_ ULONG_PTR hypercall_number,
199 ; _In_opt_ void *context);
201 vmcall ; vmcall(hypercall_number, context)
202 jz errorWithCode ; if (ZF) jmp
203 jc errorWithoutCode ; if (CF) jmp
204 xor rax, rax ; return VMX_OK
208 mov rax, VMX_ERROR_WITHOUT_STATUS
212 mov rax, VMX_ERROR_WITH_STATUS
216 ; void __stdcall AsmWriteGDT(_In_ const GDTR *gdtr);
222 ; void __stdcall AsmReadGDT(_Out_ GDTR *gdtr);
228 ; void __stdcall AsmWriteLDTR(_In_ USHORT local_segmeng_selector);
234 ; USHORT __stdcall AsmReadLDTR();
240 ; void __stdcall AsmWriteTR(_In_ USHORT task_register);
246 ; USHORT __stdcall AsmReadTR();
252 ; void __stdcall AsmWriteES(_In_ USHORT segment_selector);
258 ; USHORT __stdcall AsmReadES();
264 ; void __stdcall AsmWriteCS(_In_ USHORT segment_selector);
270 ; USHORT __stdcall AsmReadCS();
276 ; void __stdcall AsmWriteSS(_In_ USHORT segment_selector);
282 ; USHORT __stdcall AsmReadSS();
288 ; void __stdcall AsmWriteDS(_In_ USHORT segment_selector);
294 ; USHORT __stdcall AsmReadDS();
300 ; void __stdcall AsmWriteFS(_In_ USHORT segment_selector);
306 ; USHORT __stdcall AsmReadFS();
312 ; void __stdcall AsmWriteGS(_In_ USHORT segment_selector);
318 ; USHORT __stdcall AsmReadGS();
324 ; ULONG_PTR __stdcall AsmLoadAccessRightsByte(_In_ ULONG_PTR segment_selector);
325 AsmLoadAccessRightsByte PROC
328 AsmLoadAccessRightsByte ENDP
330 ; void __stdcall AsmInvalidateInternalCaches();
331 AsmInvalidateInternalCaches PROC
334 AsmInvalidateInternalCaches ENDP
336 ; void __stdcall AsmWriteCR2(_In_ ULONG_PTR cr2_value);
342 ; unsigned char __stdcall AsmInvept(
343 ; _In_ InvEptType invept_type,
344 ; _In_ const InvEptDescriptor *invept_descriptor);
346 ; invept ecx, oword ptr [rdx]
347 db 66h, 0fh, 38h, 80h, 0ah
348 jz errorWithCode ; if (ZF) jmp
349 jc errorWithoutCode ; if (CF) jmp
350 xor rax, rax ; return VMX_OK
354 mov rax, VMX_ERROR_WITHOUT_STATUS
358 mov rax, VMX_ERROR_WITH_STATUS
362 ; unsigned char __stdcall AsmInvvpid(
363 ; _In_ InvVpidType invvpid_type,
364 ; _In_ const InvVpidDescriptor *invvpid_descriptor);
366 ; invvpid ecx, oword ptr [rdx]
367 db 66h, 0fh, 38h, 81h, 0ah
368 jz errorWithCode ; if (ZF) jmp
369 jc errorWithoutCode ; if (CF) jmp
370 xor rax, rax ; return VMX_OK
374 mov rax, VMX_ERROR_WITHOUT_STATUS
378 mov rax, VMX_ERROR_WITH_STATUS
385 PURGE ASM_DUMP_REGISTERS