From fb0be3b6ab38002f0563b22bba9f688a1b2652fc Mon Sep 17 00:00:00 2001 From: Petr Benes Date: Tue, 29 Oct 2019 16:27:22 +0100 Subject: [PATCH] add vcpu_t::user_data() Holds per-VCPU data assigned by the user. --- src/hvpp/hvpp/hvpp.cpp | 19 +++++++++++++++++++ src/hvpp/hvpp/hvpp.h | 13 +++++++++++++ src/hvpp/hvpp/vcpu.cpp | 12 ++++++++++++ src/hvpp/hvpp/vcpu.h | 5 +++++ src/hvppdrv_c/vmexit_custom.c | 33 ++++++++++++++++----------------- 5 files changed, 65 insertions(+), 17 deletions(-) diff --git a/src/hvpp/hvpp/hvpp.cpp b/src/hvpp/hvpp/hvpp.cpp index d95e89d..3ee4324 100644 --- a/src/hvpp/hvpp/hvpp.cpp +++ b/src/hvpp/hvpp/hvpp.cpp @@ -394,6 +394,25 @@ HvppVcpuSuppressRipAdjust( vcpu_->suppress_rip_adjust(); } +PVOID +NTAPI +HvppVcpuGetUserData( + _In_ PVCPU Vcpu + ) +{ + return vcpu_->user_data(); +} + +VOID +NTAPI +HvppVcpuSetUserData( + _In_ PVCPU Vcpu, + _In_ PVOID UserData + ) +{ + vcpu_->user_data(UserData); +} + #pragma endregion ////////////////////////////////////////////////////////////////////////// diff --git a/src/hvpp/hvpp/hvpp.h b/src/hvpp/hvpp/hvpp.h index ea4d8bc..fde140e 100644 --- a/src/hvpp/hvpp/hvpp.h +++ b/src/hvpp/hvpp/hvpp.h @@ -1300,6 +1300,19 @@ HvppVcpuSuppressRipAdjust( _In_ PVCPU Vcpu ); +PVOID +NTAPI +HvppVcpuGetUserData( + _In_ PVCPU Vcpu + ); + +VOID +NTAPI +HvppVcpuSetUserData( + _In_ PVCPU Vcpu, + _In_ PVOID UserData + ); + #pragma endregion ////////////////////////////////////////////////////////////////////////// diff --git a/src/hvpp/hvpp/vcpu.cpp b/src/hvpp/hvpp/vcpu.cpp index 9f7695c..828188f 100644 --- a/src/hvpp/hvpp/vcpu.cpp +++ b/src/hvpp/hvpp/vcpu.cpp @@ -54,6 +54,8 @@ vcpu_t::vcpu_t(vmexit_handler& handler) noexcept , tsc_delta_previous_{} , tsc_delta_sum_{} + , user_data_{} + // // Well, this is also not necessary. // This member is reset to "false" on each VM-exit in entry_host() method. @@ -423,6 +425,16 @@ void vcpu_t::suppress_rip_adjust() noexcept suppress_rip_adjust_ = true; } +auto vcpu_t::user_data() noexcept -> void* +{ + return user_data_; +} + +void vcpu_t::user_data(void* new_data) noexcept +{ + user_data_ = new_data; +} + void vcpu_t::guest_resume() noexcept { resume_context_.rax = 1; diff --git a/src/hvpp/hvpp/vcpu.h b/src/hvpp/hvpp/vcpu.h index f330bea..b276e0c 100644 --- a/src/hvpp/hvpp/vcpu.h +++ b/src/hvpp/hvpp/vcpu.h @@ -42,6 +42,9 @@ class vcpu_t final auto context() noexcept -> context_t&; void suppress_rip_adjust() noexcept; + auto user_data() noexcept -> void*; + void user_data(void* data) noexcept; + // // Guest helper methods. // @@ -459,6 +462,8 @@ class vcpu_t final // interrupt_queue_t pending_interrupt_queue_[interrupt_queue_max]; + void* user_data_; + bool suppress_rip_adjust_; }; diff --git a/src/hvppdrv_c/vmexit_custom.c b/src/hvppdrv_c/vmexit_custom.c index 0b65864..fe492a6 100644 --- a/src/hvppdrv_c/vmexit_custom.c +++ b/src/hvppdrv_c/vmexit_custom.c @@ -14,7 +14,6 @@ typedef struct _PER_VCPU_DATA PHYSICAL_ADDRESS PageExec; } PER_VCPU_DATA, *PPER_VCPU_DATA; -PER_VCPU_DATA PerVcpuData[32]; VOID NTAPI @@ -50,7 +49,7 @@ HvppHandleExecuteVmcall( PVCPU_CONTEXT Context = HvppVcpuContext(Vcpu); PEPT Ept = HvppVcpuGetCurrentEpt(Vcpu); - PPER_VCPU_DATA Data = &PerVcpuData[KeGetCurrentProcessorNumberEx(NULL)]; + PPER_VCPU_DATA UserData = (PPER_VCPU_DATA)(HvppVcpuGetUserData(Vcpu)); switch (Context->Rcx) { @@ -58,22 +57,22 @@ HvppHandleExecuteVmcall( { ULONG_PTR Cr3; HvppAttachAddressSpace(&Cr3); - Data->PageRead = MmGetPhysicalAddress(Context->RdxAsPointer); - Data->PageExec = MmGetPhysicalAddress(Context->R8AsPointer); + UserData->PageRead = MmGetPhysicalAddress(Context->RdxAsPointer); + UserData->PageExec = MmGetPhysicalAddress(Context->R8AsPointer); HvppDetachAddressSpace(Cr3); } HvppTrace("vmcall (hook) EXEC: 0x%p READ: 0x%p", - Data->PageExec.QuadPart, - Data->PageRead.QuadPart); + UserData->PageExec.QuadPart, + UserData->PageRead.QuadPart); HvppEptSplit2MbTo4Kb(Ept, - EPT_PD_PAGE_ALIGN(Data->PageExec), - EPT_PD_PAGE_ALIGN(Data->PageExec)); + EPT_PD_PAGE_ALIGN(UserData->PageExec), + EPT_PD_PAGE_ALIGN(UserData->PageExec)); HvppEptMap4Kb(Ept, - Data->PageExec, - Data->PageExec, + UserData->PageExec, + UserData->PageExec, EPT_ACCESS_EXECUTE); HvppInveptSingleContext(HvppEptGetEptPointer(Ept)); @@ -83,8 +82,8 @@ HvppHandleExecuteVmcall( HvppTrace("vmcall (unhook)"); HvppEptJoin4KbTo2Mb(Ept, - EPT_PD_PAGE_ALIGN(Data->PageExec), - EPT_PD_PAGE_ALIGN(Data->PageExec)); + EPT_PD_PAGE_ALIGN(UserData->PageExec), + EPT_PD_PAGE_ALIGN(UserData->PageExec)); HvppInveptSingleContext(HvppEptGetEptPointer(Ept)); break; @@ -114,7 +113,7 @@ HvppHandleEptViolation( PEPT Ept = HvppVcpuGetCurrentEpt(Vcpu); - PPER_VCPU_DATA Data = &PerVcpuData[KeGetCurrentProcessorNumberEx(NULL)]; + PPER_VCPU_DATA UserData = (PPER_VCPU_DATA)(HvppVcpuGetUserData(Vcpu)); if (EptViolation.DataRead || EptViolation.DataWrite) { @@ -123,8 +122,8 @@ HvppHandleEptViolation( GuestPhysicalAddress.QuadPart); HvppEptMap4Kb(Ept, - Data->PageExec, - Data->PageRead, + UserData->PageExec, + UserData->PageRead, EPT_ACCESS_READ_WRITE); } else if (EptViolation.DataExecute) @@ -134,8 +133,8 @@ HvppHandleEptViolation( GuestPhysicalAddress.QuadPart); HvppEptMap4Kb(Ept, - Data->PageExec, - Data->PageExec, + UserData->PageExec, + UserData->PageExec, EPT_ACCESS_EXECUTE); }