From b7f0e7d3351a3eb9089abb55d5a2baeef6244493 Mon Sep 17 00:00:00 2001 From: Nitr0-G <120374383+Nitr0-G@users.noreply.github.com> Date: Sat, 12 Aug 2023 07:14:11 +0300 Subject: [PATCH 1/3] Full support of SSE and AVX were added in VMM.asm --- Kernel-Bridge/API/VMM.asm | 94 ++++++++++++++++++++++++++++++++++----- 1 file changed, 82 insertions(+), 12 deletions(-) diff --git a/Kernel-Bridge/API/VMM.asm b/Kernel-Bridge/API/VMM.asm index d675389..99d4a64 100644 --- a/Kernel-Bridge/API/VMM.asm +++ b/Kernel-Bridge/API/VMM.asm @@ -45,8 +45,10 @@ __kb_vmcall ENDP GPR_CONTEXT_ENTRIES equ 15 ; rax, rbx, rcx, rdx, rsi, rdi, rbp, r8..r15 GPR_CONTEXT_SIZE equ GPR_CONTEXT_ENTRIES * sizeof(QWORD) -XMM_CONTEXT_ENTRIES equ 6 ; xmm0..xmm5 +XMM_CONTEXT_ENTRIES equ 16 ; xmm0..xmm15 XMM_CONTEXT_SIZE equ XMM_CONTEXT_ENTRIES * sizeof(OWORD) +YMM_CONTEXT_ENTRIES equ 16 ; ymm0..ymm15 +YMM_CONTEXT_SIZE equ YMM_CONTEXT_ENTRIES * sizeof(YMMWORD) CPUID_VMM_SHUTDOWN equ 01EE7C0DEh @@ -98,6 +100,16 @@ PUSHAXMM MACRO movaps [rsp + 3 * sizeof(OWORD)], xmm3 movaps [rsp + 4 * sizeof(OWORD)], xmm4 movaps [rsp + 5 * sizeof(OWORD)], xmm5 + movaps [rsp + 6 * sizeof(OWORD)], xmm6 + movaps [rsp + 7 * sizeof(OWORD)], xmm7 + movaps [rsp + 8 * sizeof(OWORD)], xmm8 + movaps [rsp + 9 * sizeof(OWORD)], xmm9 + movaps [rsp + 10 * sizeof(OWORD)], xmm10 + movaps [rsp + 11 * sizeof(OWORD)], xmm11 + movaps [rsp + 12 * sizeof(OWORD)], xmm12 + movaps [rsp + 13 * sizeof(OWORD)], xmm13 + movaps [rsp + 14 * sizeof(OWORD)], xmm14 + movaps [rsp + 15 * sizeof(OWORD)], xmm15 ENDM POPAXMM MACRO @@ -107,9 +119,71 @@ POPAXMM MACRO movaps xmm3, [rsp + 3 * sizeof(OWORD)] movaps xmm4, [rsp + 4 * sizeof(OWORD)] movaps xmm5, [rsp + 5 * sizeof(OWORD)] + movaps xmm6, [rsp + 0 * sizeof(OWORD)] + movaps xmm7, [rsp + 1 * sizeof(OWORD)] + movaps xmm8, [rsp + 2 * sizeof(OWORD)] + movaps xmm9, [rsp + 3 * sizeof(OWORD)] + movaps xmm10, [rsp + 4 * sizeof(OWORD)] + movaps xmm11, [rsp + 5 * sizeof(OWORD)] + movaps xmm12, [rsp + 0 * sizeof(OWORD)] + movaps xmm13, [rsp + 1 * sizeof(OWORD)] + movaps xmm14, [rsp + 2 * sizeof(OWORD)] + movaps xmm15, [rsp + 3 * sizeof(OWORD)] add rsp, XMM_CONTEXT_SIZE ENDM +PUSHAYMM MACRO + sub rsp, YMM_CONTEXT_SIZE + vmovdqu ymmword ptr [rsp + 0 * sizeof(YMMWORD)], ymm0 + vmovdqu ymmword ptr [rsp + 1 * sizeof(YMMWORD)], ymm1 + vmovdqu ymmword ptr [rsp + 2 * sizeof(YMMWORD)], ymm2 + vmovdqu ymmword ptr [rsp + 3 * sizeof(YMMWORD)], ymm3 + vmovdqu ymmword ptr [rsp + 4 * sizeof(YMMWORD)], ymm4 + vmovdqu ymmword ptr [rsp + 5 * sizeof(YMMWORD)], ymm5 + vmovdqu ymmword ptr [rsp + 6 * sizeof(YMMWORD)], ymm6 + vmovdqu ymmword ptr [rsp + 7 * sizeof(YMMWORD)], ymm7 + vmovdqu ymmword ptr [rsp + 8 * sizeof(YMMWORD)], ymm8 + vmovdqu ymmword ptr [rsp + 9 * sizeof(YMMWORD)], ymm9 + vmovdqu ymmword ptr [rsp + 10 * sizeof(YMMWORD)], ymm10 + vmovdqu ymmword ptr [rsp + 11 * sizeof(YMMWORD)], ymm11 + vmovdqu ymmword ptr [rsp + 12 * sizeof(YMMWORD)], ymm12 + vmovdqu ymmword ptr [rsp + 13 * sizeof(YMMWORD)], ymm13 + vmovdqu ymmword ptr [rsp + 14 * sizeof(YMMWORD)], ymm14 + vmovdqu ymmword ptr [rsp + 15 * sizeof(YMMWORD)], ymm15 +ENDM + +POPAYMM MACRO + vmovdqu ymm0, ymmword ptr [rsp + 0 * sizeof(YMMWORD)] + vmovdqu ymm1, ymmword ptr [rsp + 1 * sizeof(YMMWORD)] + vmovdqu ymm2, ymmword ptr [rsp + 2 * sizeof(YMMWORD)] + vmovdqu ymm3, ymmword ptr [rsp + 3 * sizeof(YMMWORD)] + vmovdqu ymm4, ymmword ptr [rsp + 4 * sizeof(YMMWORD)] + vmovdqu ymm5, ymmword ptr [rsp + 5 * sizeof(YMMWORD)] + vmovdqu ymm6, ymmword ptr [rsp + 0 * sizeof(YMMWORD)] + vmovdqu ymm7, ymmword ptr [rsp + 1 * sizeof(YMMWORD)] + vmovdqu ymm8, ymmword ptr [rsp + 2 * sizeof(YMMWORD)] + vmovdqu ymm9, ymmword ptr [rsp + 3 * sizeof(YMMWORD)] + vmovdqu ymm10, ymmword ptr [rsp + 4 * sizeof(YMMWORD)] + vmovdqu ymm11, ymmword ptr [rsp + 5 * sizeof(YMMWORD)] + vmovdqu ymm12, ymmword ptr [rsp + 0 * sizeof(YMMWORD)] + vmovdqu ymm13, ymmword ptr [rsp + 1 * sizeof(YMMWORD)] + vmovdqu ymm14, ymmword ptr [rsp + 2 * sizeof(YMMWORD)] + vmovdqu ymm15, ymmword ptr [rsp + 3 * sizeof(YMMWORD)] + add rsp, YMM_CONTEXT_SIZE +ENDM + +MULTIPUSH MACRO + PUSHAQ + mov rcx, [rsp + GPR_CONTEXT_SIZE + 16] ; RCX -> PRIVATE_VM_DATA* Private + mov rdx, rsp ; RDX -> Guest context + + PUSHAXMM + mov r8, rsp + + PUSHAYMM + mov r9, rsp +ENDM + PROLOGUE MACRO push rbp mov rbp, rsp @@ -157,14 +231,12 @@ VmmLoop: vmsave rax ; RAX was restored to host's state (RAX -> GuestVmcbPa) ; On #VMEXIT we have the guest context, so save it to the stack: - PUSHAQ - mov rcx, [rsp + GPR_CONTEXT_SIZE + 16] ; RCX -> PRIVATE_VM_DATA* Private - mov rdx, rsp ; RDX -> Guest context + MULTIPUSH - PUSHAXMM sub rsp, 32 ; Homing space for the x64 call convention - call SvmVmexitHandler ; VMM_STATUS SvmVmexitHandler(PRIVATE_VM_DATA* Private, GUEST_CONTEXT* Context) + call SvmVmexitHandler ; VMM_STATUS SvmVmexitHandler(PRIVATE_VM_DATA* Private, GUEST_CONTEXT* Context, GUEST_SSE_CONTEXT* SSeContext, GUEST_AVX_CONTEXT* AvxContext) add rsp, 32 + POPAYMM POPAXMM test al, al ; if (!SvmVmexitHandler(...)) break; @@ -203,14 +275,12 @@ __invvpid PROC PUBLIC __invvpid ENDP VmxVmmRun PROC PUBLIC - PUSHAQ - mov rcx, [rsp + GPR_CONTEXT_SIZE + 16] - mov rdx, rsp + MULTIPUSH - PUSHAXMM sub rsp, 32 ; Homing space for the x64 call convention - call VmxVmexitHandler ; VMM_STATUS VmxVmexitHandler(PRIVATE_VM_DATA* Private, GUEST_CONTEXT* Context) + call VmxVmexitHandler ; VMM_STATUS VmxVmexitHandler(PRIVATE_VM_DATA* Private, GUEST_CONTEXT* Context, GUEST_SSE_CONTEXT* SSeContext, GUEST_AVX_CONTEXT* AvxContext) add rsp, 32 + POPAYMM POPAXMM test al, al ; if (!SvmVmexitHandler(...)) break; @@ -233,4 +303,4 @@ VmmExit: jmp rbx VmxVmmRun ENDP -END \ No newline at end of file +END From d393778a50be24d78e4613c2111581b2384b8f2a Mon Sep 17 00:00:00 2001 From: Nitr0-G <120374383+Nitr0-G@users.noreply.github.com> Date: Sat, 12 Aug 2023 07:57:33 +0300 Subject: [PATCH 2/3] Full support of SSE and AVX were added in Hypervisor.cpp --- Kernel-Bridge/API/Hypervisor.cpp | 102 +++++++++++++++++++++++++------ 1 file changed, 83 insertions(+), 19 deletions(-) diff --git a/Kernel-Bridge/API/Hypervisor.cpp b/Kernel-Bridge/API/Hypervisor.cpp index a216b4a..f047118 100644 --- a/Kernel-Bridge/API/Hypervisor.cpp +++ b/Kernel-Bridge/API/Hypervisor.cpp @@ -49,23 +49,67 @@ enum class VMM_STATUS : bool VMM_CONTINUE = true // Continue execution in the virtualized environment }; +typedef uint64_t uint128_t[2]; +typedef uint64_t uint256_t[4]; + struct GUEST_CONTEXT { - unsigned long long Rax; - unsigned long long Rbx; - unsigned long long Rcx; - unsigned long long Rdx; - unsigned long long Rsi; - unsigned long long Rdi; - unsigned long long Rbp; - unsigned long long R8; - unsigned long long R9; - unsigned long long R10; - unsigned long long R11; - unsigned long long R12; - unsigned long long R13; - unsigned long long R14; - unsigned long long R15; + + uint64_t Rax; + uint64_t Rbx; + uint64_t Rcx; + uint64_t Rdx; + uint64_t Rsi; + uint64_t Rdi; + uint64_t Rbp; + uint64_t R8; + uint64_t R9; + uint64_t R10; + uint64_t R11; + uint64_t R12; + uint64_t R13; + uint64_t R14; + uint64_t R15; +}; + +struct GUEST_SSE_CONTEXT +{ + uint128_t xmm0; + uint128_t xmm1; + uint128_t xmm2; + uint128_t xmm3; + uint128_t xmm4; + uint128_t xmm5; + uint128_t xmm6; + uint128_t xmm7; + uint128_t xmm8; + uint128_t xmm9; + uint128_t xmm10; + uint128_t xmm11; + uint128_t xmm12; + uint128_t xmm13; + uint128_t xmm14; + uint128_t xmm15; +}; + +struct GUEST_AVX_CONTEXT +{ + uint256_t ymm0; + uint256_t ymm1; + uint256_t ymm2; + uint256_t ymm3; + uint256_t ymm4; + uint256_t ymm5; + uint256_t ymm6; + uint256_t ymm7; + uint256_t ymm8; + uint256_t ymm9; + uint256_t ymm10; + uint256_t ymm11; + uint256_t ymm12; + uint256_t ymm13; + uint256_t ymm14; + uint256_t ymm15; }; static volatile bool g_IsVirtualized = false; @@ -371,7 +415,7 @@ namespace SVM Guest->ControlArea.EventInjection = Event.Value; } - extern "C" VMM_STATUS SvmVmexitHandler(PRIVATE_VM_DATA* Private, GUEST_CONTEXT* Context) + extern "C" VMM_STATUS SvmVmexitHandler(PRIVATE_VM_DATA* Private, GUEST_CONTEXT* Context, GUEST_SSE_CONTEXT* SSeContext, GUEST_AVX_CONTEXT* AvxContext) { // Load the host state: __svm_vmload(reinterpret_cast(Private->VmmStack.Layout.InitialStack.HostVmcbPa)); @@ -388,7 +432,7 @@ namespace SVM int Function = static_cast(Context->Rax); int SubLeaf = static_cast(Context->Rcx); __cpuidex(Regs.Raw, Function, SubLeaf); - + switch (Function) { case CPUID_VMM_SHUTDOWN: { @@ -593,6 +637,16 @@ namespace SVM // Check the 'AuthenticAMD' vendor name: __cpuid(Regs.Raw, CPUID::Generic::CPUID_MAXIMUM_FUNCTION_NUMBER_AND_VENDOR_ID); if (Regs.Regs.Ebx != 'htuA' || Regs.Regs.Edx != 'itne' || Regs.Regs.Ecx != 'DMAc') return false; + + // Check for SSE (Streaming SIMD Extensions) support + constexpr unsigned int CPUID_FN0000001_EDX_SSE_SET = 1 << 25; + __cpuid(Regs.Raw, CPUID::AMD::CPUID_FEATURE_INFORMATION); + if ((Regs.Regs.Edx & CPUID_FN0000001_EDX_SSE_SET) == 0) return false; + + // Check for AVX (Advanced Vector Extensions) support + constexpr unsigned int CPUID_FN0000001_ECX_AVX_SET = 1 << 28; + __cpuid(Regs.Raw, CPUID::AMD::CPUID_FEATURE_INFORMATION); + if ((Regs.Regs.Ecx & CPUID_FN0000001_ECX_AVX_SET) == 0) return false; // Check the AMD SVM (AMD-V) support: constexpr unsigned int CPUID_FN80000001_ECX_SVM = 1 << 2; @@ -2509,7 +2563,7 @@ namespace VMX _IRQL_requires_same_ _IRQL_requires_min_(HIGH_LEVEL) - extern "C" VMM_STATUS VmxVmexitHandler(PRIVATE_VM_DATA* Private, __inout GUEST_CONTEXT* Context) + extern "C" VMM_STATUS VmxVmexitHandler(PRIVATE_VM_DATA* Private, GUEST_CONTEXT* Context, GUEST_SSE_CONTEXT* SSeContext, GUEST_AVX_CONTEXT* AvxContext) { /* Interrupts are locked */ @@ -2714,6 +2768,16 @@ namespace VMX __cpuid(Regs.Raw, CPUID::Generic::CPUID_MAXIMUM_FUNCTION_NUMBER_AND_VENDOR_ID); if (Regs.Regs.Ebx != 'uneG' || Regs.Regs.Edx != 'Ieni' || Regs.Regs.Ecx != 'letn') return false; + // Check for SSE (Streaming SIMD Extensions) support + constexpr unsigned int CPUID_FN0000001_EDX_SSE_SET = 1 << 25; + __cpuid(Regs.Raw, CPUID::AMD::CPUID_FEATURE_INFORMATION); + if ((Regs.Regs.Edx & CPUID_FN0000001_EDX_SSE_SET) == 0) return false; + + // Check for AVX (Advanced Vector Extensions) support + constexpr unsigned int CPUID_FN0000001_ECX_AVX_SET = 1 << 28; + __cpuid(Regs.Raw, CPUID::AMD::CPUID_FEATURE_INFORMATION); + if ((Regs.Regs.Ecx & CPUID_FN0000001_ECX_AVX_SET) == 0) return false; + // Support by processor: __cpuid(Regs.Raw, CPUID::Intel::CPUID_FEATURE_INFORMATION); if (!reinterpret_cast(&Regs)->Intel.VMX) return false; @@ -2833,4 +2897,4 @@ namespace Hypervisor return false; #endif } -} \ No newline at end of file +} From cd5269127276476c39cf0abbf4685169306d41a7 Mon Sep 17 00:00:00 2001 From: Nitr0-G <120374383+Nitr0-G@users.noreply.github.com> Date: Sat, 12 Aug 2023 07:58:35 +0300 Subject: [PATCH 3/3] Context entries fix in VMM.asm --- Kernel-Bridge/API/VMM.asm | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/Kernel-Bridge/API/VMM.asm b/Kernel-Bridge/API/VMM.asm index 99d4a64..d481332 100644 --- a/Kernel-Bridge/API/VMM.asm +++ b/Kernel-Bridge/API/VMM.asm @@ -45,10 +45,9 @@ __kb_vmcall ENDP GPR_CONTEXT_ENTRIES equ 15 ; rax, rbx, rcx, rdx, rsi, rdi, rbp, r8..r15 GPR_CONTEXT_SIZE equ GPR_CONTEXT_ENTRIES * sizeof(QWORD) -XMM_CONTEXT_ENTRIES equ 16 ; xmm0..xmm15 -XMM_CONTEXT_SIZE equ XMM_CONTEXT_ENTRIES * sizeof(OWORD) -YMM_CONTEXT_ENTRIES equ 16 ; ymm0..ymm15 -YMM_CONTEXT_SIZE equ YMM_CONTEXT_ENTRIES * sizeof(YMMWORD) +XMM_YMM_CONTEXT_ENTRIES equ 16 ; xmm0..xmm15 ymm0..ymm15 +XMM_CONTEXT_SIZE equ XMM_YMM_CONTEXT_ENTRIES * sizeof(OWORD) +YMM_CONTEXT_SIZE equ XMM_YMM_CONTEXT_ENTRIES * sizeof(YMMWORD) CPUID_VMM_SHUTDOWN equ 01EE7C0DEh