From d8eb913052df7d0ba377af6dd1c347fa00984e77 Mon Sep 17 00:00:00 2001 From: Petr Benes Date: Thu, 1 Aug 2019 18:58:23 +0200 Subject: [PATCH] introduce new flexible memory allocator Previous "custom" allocator was non-replaceable and hard-coded. The "custom allocator" term has been replaced with "hypervisor allocator", as it more closely reflects when is this allocator used. Most importantly, this change allows anyone to write their own allocator/heap manager (or use existing one, such as dlmalloc) and bind it as the hypervisor allocator. --- src/hvpp/hvpp.vcxproj | 7 +- src/hvpp/hvpp.vcxproj.filters | 30 +- src/hvpp/hvpp/hvpp.cpp | 15 + src/hvpp/hvpp/hypervisor.cpp | 25 +- src/hvpp/hvpp/lib/driver.cpp | 215 +++++++-- src/hvpp/hvpp/lib/driver.h | 8 + src/hvpp/hvpp/lib/mm.cpp | 434 ++---------------- src/hvpp/hvpp/lib/mm.h | 78 ++-- src/hvpp/hvpp/lib/mm/memory_allocator.h | 30 ++ .../hypervisor_memory_allocator.cpp | 389 ++++++++++++++++ .../hypervisor_memory_allocator.h | 49 ++ .../system_memory_allocator.cpp | 54 +++ .../system_memory_allocator.h | 22 + .../win32/system_memory_allocator.cpp | 52 +++ src/hvpp/hvpp/lib/win32/mm.cpp | 28 -- src/hvppdrv/main.cpp | 2 +- 16 files changed, 927 insertions(+), 511 deletions(-) create mode 100644 src/hvpp/hvpp/lib/mm/memory_allocator.h create mode 100644 src/hvpp/hvpp/lib/mm/memory_allocator/hypervisor_memory_allocator.cpp create mode 100644 src/hvpp/hvpp/lib/mm/memory_allocator/hypervisor_memory_allocator.h create mode 100644 src/hvpp/hvpp/lib/mm/memory_allocator/system_memory_allocator.cpp create mode 100644 src/hvpp/hvpp/lib/mm/memory_allocator/system_memory_allocator.h create mode 100644 src/hvpp/hvpp/lib/mm/memory_allocator/win32/system_memory_allocator.cpp delete mode 100644 src/hvpp/hvpp/lib/win32/mm.cpp diff --git a/src/hvpp/hvpp.vcxproj b/src/hvpp/hvpp.vcxproj index 89ee08b..a787f54 100644 --- a/src/hvpp/hvpp.vcxproj +++ b/src/hvpp/hvpp.vcxproj @@ -104,6 +104,9 @@ + + + @@ -128,7 +131,6 @@ - false @@ -146,6 +148,9 @@ + + + diff --git a/src/hvpp/hvpp.vcxproj.filters b/src/hvpp/hvpp.vcxproj.filters index c322ec0..09f9eba 100644 --- a/src/hvpp/hvpp.vcxproj.filters +++ b/src/hvpp/hvpp.vcxproj.filters @@ -76,6 +76,15 @@ {c53a048f-d7da-48c6-88a5-fa1ed9bfc1ed} + + {feefc873-6b26-444a-a474-21d23ab84328} + + + {63686d7b-f1d9-4d9b-85af-efede73a8e9f} + + + {095769d8-145a-47c6-abfa-d8a363b91f9b} + @@ -114,9 +123,6 @@ Source Files\hvpp\lib - - Source Files\hvpp\lib\win32 - Source Files\hvpp\lib @@ -162,6 +168,15 @@ Source Files\hvpp\lib\mm\win32 + + Source Files\hvpp\lib\mm\memory_allocator + + + Source Files\hvpp\lib\mm\memory_allocator + + + Source Files\hvpp\lib\mm\memory_allocator\win32 + @@ -344,6 +359,15 @@ Header Files\hvpp\lib + + Header Files\hvpp\lib\mm\memory_allocator + + + Header Files\hvpp\lib\mm\memory_allocator + + + Header Files\hvpp\lib\mm + diff --git a/src/hvpp/hvpp/hvpp.cpp b/src/hvpp/hvpp/hvpp.cpp index f2c9a59..a7bf521 100644 --- a/src/hvpp/hvpp/hvpp.cpp +++ b/src/hvpp/hvpp/hvpp.cpp @@ -218,6 +218,19 @@ HvppInitialize( return STATUS_INSUFFICIENT_RESOURCES; } + if (auto err = driver::common::system_allocator_default_initialize()) + { + driver::common::destroy(); + return STATUS_INSUFFICIENT_RESOURCES; + } + + if (auto err = driver::common::hypervisor_allocator_default_initialize()) + { + driver::common::system_allocator_default_destroy(); + driver::common::destroy(); + return STATUS_INSUFFICIENT_RESOURCES; + } + return STATUS_SUCCESS; } @@ -232,6 +245,8 @@ HvppDestroy( // driver::common::destroy(); + driver::common::hypervisor_allocator_default_destroy(); + driver::common::system_allocator_default_destroy(); } NTSTATUS diff --git a/src/hvpp/hvpp/hypervisor.cpp b/src/hvpp/hvpp/hypervisor.cpp index ceb967a..992c35d 100644 --- a/src/hvpp/hvpp/hypervisor.cpp +++ b/src/hvpp/hvpp/hypervisor.cpp @@ -3,6 +3,7 @@ #include "ia32/cpuid/cpuid_eax_01.h" #include "lib/assert.h" +#include "lib/driver.h" #include "lib/log.h" #include "lib/mm.h" #include "lib/mp.h" @@ -102,6 +103,28 @@ namespace hvpp::hypervisor vcpu_t(handler); }); + // + // Check if hypervisor-allocator has been set. + // + // If the driver is calling `hypervisor::start()' from + // the `driver::initialize()' method without manually + // setting the allocator, then the allocator will be empty. + // + if (!mm::hypervisor_allocator()) + { + // + // Hypervisor-allocator has not been set - create default one. + // + // Note: + // Default hypervisor allocator is automatically destroyed + // in the `driver::common::destroy()' function. + // + if (auto err = driver::common::hypervisor_allocator_default_initialize()) + { + return err; + } + } + // // Check that CPU supports all required features to // run this hypervisor. @@ -160,7 +183,7 @@ namespace hvpp::hypervisor // Destroy array of VCPUs. // std::destroy_n(global.vcpu_list, mp::cpu_count()); - delete global.vcpu_list; + delete static_cast(global.vcpu_list); global.vcpu_list = nullptr; diff --git a/src/hvpp/hvpp/lib/driver.cpp b/src/hvpp/hvpp/lib/driver.cpp index a1c7be1..9faaca8 100644 --- a/src/hvpp/hvpp/lib/driver.cpp +++ b/src/hvpp/hvpp/lib/driver.cpp @@ -3,17 +3,22 @@ #include "assert.h" #include "mm.h" #include "mp.h" +#include "object.h" #include "log.h" #include namespace driver::common { - void* system_memory_ = nullptr; - size_t system_memory_size_ = 0; + static driver_initialize_fn driver_initialize_; + static driver_destroy_fn driver_destroy_; - driver_initialize_fn driver_initialize_; - driver_destroy_fn driver_destroy_; + static object_t system_memory_allocator_object_; + static object_t hypervisor_memory_allocator_object_; + + static bool has_default_hypervisor_allocator_ = false; + static void* hypervisor_allocator_base_address_ = nullptr; + static size_t hypervisor_allocator_capacity_ = 0; auto initialize( @@ -21,9 +26,6 @@ namespace driver::common driver_destroy_fn driver_destroy ) noexcept -> error_code_t { - hvpp_assert(system_memory_ == nullptr); - hvpp_assert(system_memory_size_ == 0); - // // Either both must be set or both must be nullptr, // nothing else. @@ -54,59 +56,46 @@ namespace driver::common mm::paging_descriptor().dump(); // - // Estimate required memory size. - // If hypervisor begins to run out of memory, required_memory_size - // is the right variable to adjust. + // Initialize default system allocator. // - // Default required memory size is 34MB per CPU. - // - const auto required_memory_size = ( - // - // Estimated EPT size: - // Make space for 2MB EPT entries for 512 GB of the physical - // memory. Each EPT entry has 8 bytes. - // 512GB / 2MB * 8 = 256kb * 8 = 2MB per CPU. - // - ((512ull * 1024 * 1024 * 1024) / (2ull * 1024 * 1024) * 8) - - + - - // - // Additional 32MB per CPU. - // - (32ull * 1024 * 1024) - ) * mp::cpu_count(); + if (auto err = system_allocator_default_initialize()) + { + return err; + } // - // Round up to page boundary. + // If driver doesn't have initialize() function, we're finished. // - system_memory_size_ = ia32::round_to_pages(required_memory_size); - - hvpp_info("Number of processors: %u", mp::cpu_count()); - hvpp_info("Reserved memory: %" PRIu64 " MB", - system_memory_size_ / 1024 / 1024); + if (!driver_initialize_) + { + return {}; + } // - // Allocate memory. + // ...otherwise, call the provided initialize() function. // - system_memory_ = mm::system_allocate(required_memory_size); + if (auto err = driver_initialize_()) + { + return err; + } - if (!system_memory_) + // + // Check if hypervisor allocator has been set. + // + if (mm::hypervisor_allocator()) { - return make_error_code_t(std::errc::not_enough_memory); + return {}; } // - // Assign allocated memory to the memory manager. + // ...if not, create default hypervisor allocator. // - if (auto err = mm::assign(system_memory_, system_memory_size_)) + if (auto err = hypervisor_allocator_default_initialize()) { return err; } - return driver_initialize_ - ? driver_initialize_() - : error_code_t{}; + return {}; } void @@ -127,11 +116,145 @@ namespace driver::common logger::destroy(); // - // Return allocated memory back to the system. + // Destroy default hypervisor allocator (if constructed). + // + if (has_default_hypervisor_allocator_) + { + hypervisor_allocator_default_destroy(); + } + + // + // At last, destroy default system allocator. + // + system_allocator_default_destroy(); + } + + auto system_allocator_default_initialize() noexcept -> error_code_t + { + // + // Construct and assign system allocator object. + // + system_memory_allocator_object_.initialize(); + mm::system_allocator(&*system_memory_allocator_object_); + + return {}; + } + + void system_allocator_default_destroy() noexcept + { + // + // Unassign system allocator and destroy the object. + // + mm::system_allocator(nullptr); + system_memory_allocator_object_.destroy(); + } + + auto hypervisor_allocator_default_initialize() noexcept -> error_code_t + { + hvpp_assert(hypervisor_allocator_base_address_ == nullptr); + hvpp_assert(hypervisor_allocator_capacity_ == 0); + + // + // Construct hypervisor allocator object. + // + hypervisor_memory_allocator_object_.initialize(); + + hypervisor_allocator_capacity_ = hypervisor_allocator_recommended_capacity(); + + hvpp_info("Number of processors: %u", mp::cpu_count()); + hvpp_info("Reserved memory: %" PRIu64 " MB", + hypervisor_allocator_capacity_ / 1024 / 1024); + + // + // Allocate memory. // - if (system_memory_) + hypervisor_allocator_base_address_ = mm::system_allocator()->allocate(hypervisor_allocator_capacity_); + + if (!hypervisor_allocator_base_address_) { - mm::system_free(system_memory_); + return make_error_code_t(std::errc::not_enough_memory); } + + // + // Attach allocated memory. + // + if (auto err = hypervisor_memory_allocator_object_->attach(hypervisor_allocator_base_address_, hypervisor_allocator_capacity_)) + { + return err; + } + + // + // Assign allocator. + // + mm::hypervisor_allocator(&*hypervisor_memory_allocator_object_); + + has_default_hypervisor_allocator_ = true; + + return {}; + } + + void hypervisor_allocator_default_destroy() noexcept + { + if (!hypervisor_allocator_base_address_) + { + return; + } + + if (!mm::hypervisor_allocator()) + { + return; + } + + // + // Unassign allocator. + // + mm::hypervisor_allocator(nullptr); + + // + // Detach allocated memory. + // + hypervisor_memory_allocator_object_->detach(); + + // + // Destroy object. + // + hypervisor_memory_allocator_object_.destroy(); + + // + // Return allocated memory back to the system. + // + mm::system_allocator()->free(hypervisor_allocator_base_address_); + } + + auto hypervisor_allocator_recommended_capacity() noexcept -> size_t + { + // + // Estimate required memory size. + // If hypervisor begins to run out of memory, required_memory_size + // is the right variable to adjust. + // + // Default required memory size is 34MB per CPU. + // + const auto recommended_memory_size = ( + // + // Estimated EPT size: + // Make space for 2MB EPT entries for 512 GB of the physical + // memory. Each EPT entry has 8 bytes. + // 512GB / 2MB * 8 = 256kb * 8 = 2MB per CPU. + // + ((512ull * 1024 * 1024 * 1024) / (2ull * 1024 * 1024) * 8) + + + + + // + // Additional 32MB per CPU. + // + (32ull * 1024 * 1024) + ) * mp::cpu_count(); + + // + // Round up to page boundary. + // + return ia32::round_to_pages(recommended_memory_size); } } diff --git a/src/hvpp/hvpp/lib/driver.h b/src/hvpp/hvpp/lib/driver.h index b2b2a18..16c363b 100644 --- a/src/hvpp/hvpp/lib/driver.h +++ b/src/hvpp/hvpp/lib/driver.h @@ -17,6 +17,14 @@ namespace driver ) noexcept -> error_code_t; void destroy() noexcept; + + auto hypervisor_allocator_recommended_capacity() noexcept -> size_t; + + auto system_allocator_default_initialize() noexcept -> error_code_t; + void system_allocator_default_destroy() noexcept; + + auto hypervisor_allocator_default_initialize() noexcept -> error_code_t; + void hypervisor_allocator_default_destroy() noexcept; } auto initialize() noexcept -> error_code_t; diff --git a/src/hvpp/hvpp/lib/mm.cpp b/src/hvpp/hvpp/lib/mm.cpp index 92bc6f9..9992a61 100644 --- a/src/hvpp/hvpp/lib/mm.cpp +++ b/src/hvpp/hvpp/lib/mm.cpp @@ -1,90 +1,32 @@ #include "mm.h" -#include "hvpp/ia32/memory.h" -#include "hvpp/config.h" - #include "assert.h" -#include "bitmap.h" -#include "object.h" -#include "spinlock.h" #include "mp.h" - -#include -#include -#include - -// -// Simple memory manager implementation. -// -// Because in VM-exits it is very dangerous to call OS -// functions for memory (de)allocation (they can cause -// IPIs and/or TLB flush), the hypervisor has its own -// simple memory manager. The memory manager should be -// the very first thing to initialize. -// -// Memory manager is provided memory space on which it -// can operate. Small part from this space is reserved -// for the page bitmap and page allocation map. -// -// Page bitmap sets bit 1 at page offset, if the page is -// allocated (e.g.: if 4th page (at base_address + 4*PAGE_SIZE) -// is allocated, 4th bit in this bitmap is set). -// On deallocation, corresponding bit is reset to 0. -// -// Page allocation map stores number of pages allocated -// for the particular address (e.g.: allocate(8192) returned -// (base_address + 4*PAGE_SIZE), which is 2 pages, therefore -// page_allocation_map[4] == 2. -// On deallocation, corresponding number in the map is reset -// to 0. -// -// Note: allocations are always page-aligned - therefore -// allocation for even 1 byte results in waste of -// 4096 bytes. -// +#include "../config.h" namespace mm { - using pgbmp_t = object_t>; - using pgmap_t = uint16_t; - struct global_t { - uint8_t* base_address; // Pool base address - size_t available_size; // Available memory in the pool - - pgbmp_t page_bitmap; // Bitmap holding used pages - int page_bitmap_buffer_size; // + memory_allocator* allocator[HVPP_MAX_CPU]; - pgmap_t* page_allocation_map; // Map holding number of allocated pages - int page_allocation_map_size; // - - int last_page_offset; // Last returned page offset - used as hint - - size_t allocated_bytes; - size_t free_bytes; - - allocator_t allocator[HVPP_MAX_CPU]; + memory_allocator* system_allocator; + memory_allocator* custom_allocator; object_t paging_descriptor; object_t physical_memory_descriptor; object_t mtrr_descriptor; - - object_t lock; }; global_t global; - const allocator_t system_allocator = { &system_allocate, &system_free }; - const allocator_t custom_allocator = { &allocate, &free }; - allocator_guard::allocator_guard() noexcept - : allocator_guard(custom_allocator) + : allocator_guard(global.custom_allocator) { } - allocator_guard::allocator_guard(const allocator_t& new_allocator) noexcept + allocator_guard::allocator_guard(memory_allocator* new_allocator) noexcept : previous_allocator_(allocator()) { allocator(new_allocator); @@ -98,25 +40,13 @@ namespace mm auto initialize() noexcept -> error_code_t { // - // Initialize physical memory descriptor and MTRRs. + // Initialize paging descriptor, physical memory descriptor + // and MTRR descriptor. // global.paging_descriptor.initialize(); global.physical_memory_descriptor.initialize(); global.mtrr_descriptor.initialize(); - // - // Initialize lock. - // - global.lock.initialize(); - - // - // Set system allocator by default. - // - for (auto& allocator_item : global.allocator) - { - allocator_item = system_allocator; - } - return {}; } @@ -124,327 +54,45 @@ namespace mm { // // Destroy all objects. - // Note that this method doesn't acquire the lock and - // assumes all allocations has been already freed. // global.mtrr_descriptor.destroy(); global.physical_memory_descriptor.destroy(); global.paging_descriptor.destroy(); - global.lock.destroy(); - - // - // If no memory has been assigned - leave. - // - if (!global.base_address) - { - return; - } - - // - // Mark memory of page_bitmap and page_allocation_map - // as freed. - // - // Note that everything "free" does is clear bits in - // page_bitmap and sets 0 to particular page_allocation_map - // items. - // - // These two calls are needed to assure that the next two - // asserts below will pass. - // - free(global.page_bitmap->buffer()); - free(global.page_allocation_map); - - // - // Checks for memory leaks. - // - hvpp_assert(global.page_bitmap->all_clear()); - - // - // Checks for allocator corruption. - // - hvpp_assert(std::all_of( - global.page_allocation_map, - global.page_allocation_map + global.page_allocation_map_size / sizeof(pgmap_t), - [](auto page_count) { return page_count == 0; })); - - global.base_address = nullptr; - global.available_size = 0; - - global.page_bitmap.destroy(); - global.page_bitmap_buffer_size = 0; - - global.page_allocation_map = nullptr; - global.page_allocation_map_size = 0; - - global.last_page_offset = 0; - global.allocated_bytes = 0; - global.free_bytes = 0; - } - - auto assign(void* address, size_t size) noexcept -> error_code_t - { - if (size < page_size * 3) - { - // - // We need at least 3 pages (see explanation below). - // - hvpp_assert(0); - return make_error_code_t(std::errc::invalid_argument); - } - - // - // If the provided address is not page aligned, align it - // to the next page. - // - if (byte_offset(address) != 0) - { - const auto lost_bytes = byte_offset(address); - - address = reinterpret_cast(page_align(address)) + page_size; - - // - // Subtract amount of "lost" bytes due to alignment. - // - size -= lost_bytes; - } - - // - // Align size to the page boundary. - // - size = page_align(size); - - // - // Check again. - // - if (size < page_size * 3) - { - hvpp_assert(0); - return make_error_code_t(std::errc::invalid_argument); - } - - // - // Address is page-aligned, size is page-aligned, and all - // requirements are met. Proceed with initialization. - // - - // - // The provided memory is split up to 3 parts: - // 1. page bitmap - stores information if page is allocated - // or not - // 2. page count - stores information how many consecutive - // pages has been allocated - // 3. memory pool - this is the memory which will be provided - // - // For (1), there is taken (size / PAGE_SIZE / 8) bytes from the - // provided memory space. - // For (2), there is taken (size / PAGE_SIZE * sizeof(pgmap_t)) - // bytes from the provided memory space. - // The rest memory is used for (3). - // - // This should account for ~93% of the provided memory space (if - // it is big enough, e.g.: 32MB). - // - - // - // Construct the page bitmap. - // - uint8_t* page_bitmap_buffer = reinterpret_cast(address); - global.page_bitmap_buffer_size = static_cast(round_to_pages(size / page_size / 8)); - memset(page_bitmap_buffer, 0, global.page_bitmap_buffer_size); - - int page_bitmap_size_in_bits = static_cast(size / page_size); - global.page_bitmap.initialize(page_bitmap_buffer, page_bitmap_size_in_bits); - - // - // Construct the page allocation map. - // - global.page_allocation_map = reinterpret_cast(page_bitmap_buffer + global.page_bitmap_buffer_size); - global.page_allocation_map_size = static_cast(round_to_pages(size / page_size) * sizeof(pgmap_t)); - memset(global.page_allocation_map, 0, global.page_allocation_map_size); - - // - // Compute available memory. - // - global.base_address = reinterpret_cast(address); - global.available_size = size; - - // - // Mark memory of page_bitmap and page_allocation_map as allocated. - // The return value of these allocations should return the exact - // address of page_bitmap_buffer and page_allocation_map. - // - void* page_bitmap_buffer_tmp = allocate(global.page_bitmap_buffer_size); - void* page_allocation_map_tmp = allocate(global.page_allocation_map_size); - - hvpp_assert(reinterpret_cast( page_bitmap_buffer) == reinterpret_cast(page_bitmap_buffer_tmp)); - hvpp_assert(reinterpret_cast(global.page_allocation_map) == reinterpret_cast(page_allocation_map_tmp)); - - (void)(page_bitmap_buffer_tmp); - (void)(page_allocation_map_tmp); - - // - // Initialize memory pool with garbage. - // This should help with debugging uninitialized variables - // and class members. - // - const auto reserved_bytes = static_cast(global.page_bitmap_buffer_size + global.page_allocation_map_size); - memset(global.base_address + reserved_bytes, 0xcc, size - reserved_bytes); - - // - // Set initial values of allocated/free bytes. - // - global.allocated_bytes = 0; - global.free_bytes = size; - - return {}; } - auto allocate(size_t size) noexcept -> void* + auto system_allocator() noexcept -> memory_allocator* { - hvpp_assert(global.base_address != nullptr && global.available_size > 0); - - // - // Return at least 1 page, even if someone required 0. - // - if (size == 0) - { - hvpp_assert(0); - size = 1; - } - - int page_count = static_cast(bytes_to_pages(size)); - - // - // Check if the desired number of pages can fit into the - // allocation map. - // - if (page_count > std::numeric_limits::max() - 1) - { - hvpp_assert(0); - return nullptr; - } - - int previous_page_offset; - - { - std::lock_guard _{ *global.lock }; - - global.last_page_offset = global.page_bitmap->find_first_clear(global.last_page_offset, page_count); - - if (global.last_page_offset == -1) - { - global.last_page_offset = 0; - global.last_page_offset = global.page_bitmap->find_first_clear(global.last_page_offset, page_count); - - if (global.last_page_offset == -1) - { - // - // Not enough memory... - // - hvpp_assert(0); - return nullptr; - } - } - - global.page_bitmap->set(global.last_page_offset, page_count); - global.page_allocation_map[global.last_page_offset] = static_cast(page_count); - - previous_page_offset = global.last_page_offset; - global.last_page_offset += page_count; - - global.allocated_bytes += page_count * page_size; - global.free_bytes -= page_count * page_size; - } - - // - // Return the final address. - // Note that we're not under lock here - we don't need it, because - // everything neccessary has been done (bitmap + page allocation map - // manipulation). - // - return global.base_address + previous_page_offset * page_size; + return global.system_allocator; } - void free(void* address) noexcept + void system_allocator(memory_allocator* new_allocator) noexcept { - // - // Our allocator always provides page-aligned memory. - // - hvpp_assert(byte_offset(address) == 0); + global.system_allocator = new_allocator; - const auto offset = static_cast(bytes_to_pages(reinterpret_cast(address) - global.base_address)); - - if (address == nullptr) - { - // - // Return immediatelly if we're trying to free NULL. - // - // hvpp_assert(0); - return; - } - - if (size_t(offset) * page_size > global.available_size) - { - // - // We don't own this memory. - // - hvpp_assert(0); - return; - } - - std::lock_guard _{ *global.lock }; - - if (global.page_allocation_map[offset] == 0) + for (auto& allocator_item : global.allocator) { - // - // This memory wasn't allocated. - // - hvpp_assert(0); - return; + allocator_item = global.system_allocator; } - - // - // Clear number of allocated pages. - // - const auto page_count = static_cast(global.page_allocation_map[offset]); - global.page_allocation_map[offset] = 0; - - // - // Clear pages in the bitmap. - // - global.page_bitmap->clear(offset, page_count); - - global.allocated_bytes -= page_count * page_size; - global.free_bytes += page_count * page_size; } - auto system_allocate(size_t size) noexcept -> void* + auto hypervisor_allocator() noexcept -> memory_allocator* { - return detail::system_allocate(size); + return global.custom_allocator; } - void system_free(void* address) noexcept + void hypervisor_allocator(memory_allocator* new_allocator) noexcept { - detail::system_free(address); + global.custom_allocator = new_allocator; } - auto allocated_bytes() noexcept -> size_t - { - return global.allocated_bytes; - } - - auto free_bytes() noexcept -> size_t - { - return global.free_bytes; - } - - auto allocator() noexcept -> const allocator_t& + auto allocator() noexcept -> memory_allocator* { return global.allocator[mp::cpu_index()]; } - void allocator(const allocator_t& new_allocator) noexcept + void allocator(memory_allocator* new_allocator) noexcept { + hvpp_assert(new_allocator); global.allocator[mp::cpu_index()] = new_allocator; } @@ -468,28 +116,32 @@ namespace detail { void* generic_allocate(size_t size) noexcept { - return mm::global.allocator[mp::cpu_index()].allocate(size); + return mm::global.allocator[mp::cpu_index()]->allocate(size); + } + + void* generic_allocate_aligned(size_t size, std::align_val_t alignment) noexcept + { + return mm::global.allocator[mp::cpu_index()]->allocate_aligned(size, static_cast(alignment)); } void generic_free(void* address) noexcept { - reinterpret_cast(address) >= mm::global.base_address && - reinterpret_cast(address) < mm::global.base_address + mm::global.available_size - ? mm::free (address) - : mm::system_free(address); + return mm::hypervisor_allocator()->contains(address) + ? mm::hypervisor_allocator()->free(address) + : mm::system_allocator()->free(address); } } -void* operator new (size_t size) { return detail::generic_allocate(size); } -void* operator new[](size_t size) { return detail::generic_allocate(size); } -void* operator new (size_t size, std::align_val_t) { return detail::generic_allocate(size); } -void* operator new[](size_t size, std::align_val_t) { return detail::generic_allocate(size); } - -void operator delete (void* address) { detail::generic_free(address); } -void operator delete[](void* address) { detail::generic_free(address); } -void operator delete[](void* address, std::size_t) { detail::generic_free(address); } -void operator delete (void* address, std::size_t) { detail::generic_free(address); } -void operator delete (void* address, std::align_val_t) { detail::generic_free(address); } -void operator delete[](void* address, std::align_val_t) { detail::generic_free(address); } -void operator delete[](void* address, std::size_t, std::align_val_t) { detail::generic_free(address); } -void operator delete (void* address, std::size_t, std::align_val_t) { detail::generic_free(address); } +void* operator new (size_t size) { return detail::generic_allocate (size); } +void* operator new[](size_t size) { return detail::generic_allocate (size); } +void* operator new (size_t size, std::align_val_t alignment) { return detail::generic_allocate_aligned(size, alignment); } +void* operator new[](size_t size, std::align_val_t alignment) { return detail::generic_allocate_aligned(size, alignment); } + +void operator delete (void* address) { detail::generic_free(address); } +void operator delete[](void* address) { detail::generic_free(address); } +void operator delete[](void* address, std::size_t) { detail::generic_free(address); } +void operator delete (void* address, std::size_t) { detail::generic_free(address); } +void operator delete (void* address, std::align_val_t) { detail::generic_free(address); } +void operator delete[](void* address, std::align_val_t) { detail::generic_free(address); } +void operator delete[](void* address, std::size_t, std::align_val_t) { detail::generic_free(address); } +void operator delete (void* address, std::size_t, std::align_val_t) { detail::generic_free(address); } diff --git a/src/hvpp/hvpp/lib/mm.h b/src/hvpp/hvpp/lib/mm.h index 861c147..fce7c95 100644 --- a/src/hvpp/hvpp/lib/mm.h +++ b/src/hvpp/hvpp/lib/mm.h @@ -3,6 +3,9 @@ #include "error.h" +#include "mm/memory_allocator.h" +#include "mm/memory_allocator/system_memory_allocator.h" +#include "mm/memory_allocator/hypervisor_memory_allocator.h" #include "mm/paging_descriptor.h" #include "mm/physical_memory_descriptor.h" #include "mm/mtrr_descriptor.h" @@ -11,58 +14,53 @@ namespace mm { - namespace detail - { - auto system_allocate(size_t size) noexcept -> void*; - void system_free(void* address) noexcept; - } - - using allocate_fn_t = void*(*)(size_t); - using free_fn_t = void(*)(void*); + // + // Initialize & destroy. + // + auto initialize() noexcept -> error_code_t; + void destroy() noexcept; - struct allocator_t - { - allocate_fn_t allocate; - free_fn_t free; - }; + // + // System allocator. + // + auto system_allocator() noexcept -> memory_allocator*; + void system_allocator(memory_allocator* new_allocator) noexcept; + + // + // Hypervisor allocator. + // + auto hypervisor_allocator() noexcept -> memory_allocator*; + void hypervisor_allocator(memory_allocator* new_allocator) noexcept; + + // + // Current allocator. + // + auto allocator() noexcept -> memory_allocator*; + void allocator(memory_allocator* new_allocator) noexcept; + + // + // Descriptor getters. + // + auto paging_descriptor() noexcept -> const paging_descriptor_t&; + auto physical_memory_descriptor() noexcept -> const physical_memory_descriptor_t&; + auto mtrr_descriptor() noexcept -> const mtrr_descriptor_t&; + // + // Allocator guard. + // class allocator_guard { public: allocator_guard() noexcept; allocator_guard(const allocator_guard& other) noexcept = delete; allocator_guard(allocator_guard&& other) noexcept = delete; - allocator_guard(const allocator_t& new_allocator) noexcept; + allocator_guard(memory_allocator* new_allocator) noexcept; ~allocator_guard() noexcept; - allocator_guard& operator=(const allocator_guard& other) noexcept = delete; + allocator_guard& operator=(allocator_guard& other) noexcept = delete; allocator_guard& operator=(allocator_guard&& other) noexcept = delete; private: - allocator_t previous_allocator_; + memory_allocator* previous_allocator_; }; - - extern const allocator_t system_allocator; - extern const allocator_t custom_allocator; - - auto initialize() noexcept -> error_code_t; - void destroy() noexcept; - - auto assign(void* address, size_t size) noexcept -> error_code_t; - - auto allocate(size_t size) noexcept -> void*; - void free(void* address) noexcept; - - auto system_allocate(size_t size) noexcept -> void*; - void system_free(void* address) noexcept; - - auto allocated_bytes() noexcept -> size_t; - auto free_bytes() noexcept -> size_t; - - auto allocator() noexcept -> const allocator_t&; - void allocator(const allocator_t& new_allocator) noexcept; - - auto paging_descriptor() noexcept -> const paging_descriptor_t&; - auto physical_memory_descriptor() noexcept -> const physical_memory_descriptor_t&; - auto mtrr_descriptor() noexcept -> const mtrr_descriptor_t&; } diff --git a/src/hvpp/hvpp/lib/mm/memory_allocator.h b/src/hvpp/hvpp/lib/mm/memory_allocator.h new file mode 100644 index 0000000..185ac12 --- /dev/null +++ b/src/hvpp/hvpp/lib/mm/memory_allocator.h @@ -0,0 +1,30 @@ +#pragma once +#include // size_t + +#include "../error.h" + +namespace mm +{ + class memory_allocator + { + public: + memory_allocator() noexcept = default; + memory_allocator(const memory_allocator& other) noexcept = delete; + memory_allocator(memory_allocator&& other) noexcept = delete; + memory_allocator& operator=(const memory_allocator& other) noexcept = delete; + memory_allocator& operator=(memory_allocator&& other) noexcept = delete; + virtual ~memory_allocator() noexcept = default; + + virtual auto attach(void* address, size_t size) noexcept -> error_code_t = 0; + virtual void detach() noexcept = 0; + + virtual auto allocate(size_t size) noexcept -> void* = 0; + virtual auto allocate_aligned(size_t size, size_t alignment) noexcept -> void* = 0; + virtual void free(void* address) noexcept = 0; + + virtual bool contains(void* address) noexcept = 0; + + virtual auto allocated_bytes() noexcept -> size_t = 0; + virtual auto free_bytes() noexcept -> size_t = 0; + }; +} diff --git a/src/hvpp/hvpp/lib/mm/memory_allocator/hypervisor_memory_allocator.cpp b/src/hvpp/hvpp/lib/mm/memory_allocator/hypervisor_memory_allocator.cpp new file mode 100644 index 0000000..b549210 --- /dev/null +++ b/src/hvpp/hvpp/lib/mm/memory_allocator/hypervisor_memory_allocator.cpp @@ -0,0 +1,389 @@ +#include "hypervisor_memory_allocator.h" + +#include "../../assert.h" +#include "../../../config.h" +#include "../../../ia32/memory.h" + +#include +#include +#include + +// +// Simple memory manager implementation. +// +// Because in VM-exits it is very dangerous to call OS +// functions for memory (de)allocation (they can cause +// IPIs and/or TLB flush), the hypervisor has its own +// simple memory manager. The memory manager should be +// the very first thing to initialize. +// +// Memory manager is provided memory space on which it +// can operate. Small part from this space is reserved +// for the page bitmap and page allocation map. +// +// Page bitmap sets bit 1 at page offset, if the page is +// allocated (e.g.: if 4th page (at base_address + 4*PAGE_SIZE) +// is allocated, 4th bit in this bitmap is set). +// On deallocation, corresponding bit is reset to 0. +// +// Page allocation map stores number of pages allocated +// for the particular address (e.g.: allocate(8192) returned +// (base_address + 4*PAGE_SIZE), which is 2 pages, therefore +// page_allocation_map[4] == 2. +// On deallocation, corresponding number in the map is reset +// to 0. +// +// Note: allocations are always page-aligned - therefore +// allocation for even 1 byte results in waste of +// 4096 bytes. +// + +namespace mm +{ + using namespace ia32; + + hypervisor_memory_allocator::hypervisor_memory_allocator() noexcept + : base_address_{} + , capacity_{} + , page_bitmap_{} + , page_bitmap_buffer_size_{} + , page_allocation_map_{} + , page_allocation_map_size_{} + , last_page_offset_{} + , allocated_bytes_{} + , free_bytes_{} + , lock_{} + { + + } + + hypervisor_memory_allocator::~hypervisor_memory_allocator() noexcept + { + + } + + auto hypervisor_memory_allocator::attach(void* address, size_t size) noexcept -> error_code_t + { + if (size < page_size * 3) + { + // + // We need at least 3 pages (see explanation below). + // + hvpp_assert(0); + return make_error_code_t(std::errc::invalid_argument); + } + + // + // If the provided address is not page aligned, align it + // to the next page. + // + if (byte_offset(address) != 0) + { + const auto lost_bytes = byte_offset(address); + + address = reinterpret_cast(page_align(address)) + page_size; + + // + // Subtract amount of "lost" bytes due to alignment. + // + size -= lost_bytes; + } + + // + // Align size to the page boundary. + // + size = page_align(size); + + // + // Check again. + // + if (size < page_size * 3) + { + hvpp_assert(0); + return make_error_code_t(std::errc::invalid_argument); + } + + // + // Address is page-aligned, size is page-aligned, and all + // requirements are met. Proceed with initialization. + // + + // + // The provided memory is split up to 3 parts: + // 1. page bitmap - stores information if page is allocated + // or not + // 2. page count - stores information how many consecutive + // pages has been allocated + // 3. memory pool - this is the memory which will be provided + // + // For (1), there is taken (size / PAGE_SIZE / 8) bytes from the + // provided memory space. + // For (2), there is taken (size / PAGE_SIZE * sizeof(pgmap_t)) + // bytes from the provided memory space. + // The rest memory is used for (3). + // + // This should account for ~93% of the provided memory space (if + // it is big enough, e.g.: 32MB). + // + + // + // Construct the page bitmap. + // + auto page_bitmap_buffer = reinterpret_cast(address); + page_bitmap_buffer_size_ = static_cast(round_to_pages(size / page_size / 8)); + memset(page_bitmap_buffer, 0, page_bitmap_buffer_size_); + + auto page_bitmap_size_in_bits = static_cast(size / page_size); + page_bitmap_ = pgbmp_t(page_bitmap_buffer, page_bitmap_size_in_bits); + + // + // Construct the page allocation map. + // + page_allocation_map_ = reinterpret_cast(page_bitmap_buffer + page_bitmap_buffer_size_); + page_allocation_map_size_ = static_cast(round_to_pages(size / page_size) * sizeof(pgmap_t)); + memset(page_allocation_map_, 0, page_allocation_map_size_); + + // + // Compute available memory. + // + base_address_ = reinterpret_cast(address); + capacity_ = size; + + // + // Mark memory of page_bitmap and page_allocation_map as allocated. + // The return value of these allocations should return the exact + // address of page_bitmap_buffer and page_allocation_map. + // + auto page_bitmap_buffer_tmp = allocate(page_bitmap_buffer_size_); + auto page_allocation_map_tmp = allocate(page_allocation_map_size_); + + hvpp_assert(reinterpret_cast(page_bitmap_buffer) == reinterpret_cast(page_bitmap_buffer_tmp)); + hvpp_assert(reinterpret_cast(page_allocation_map_) == reinterpret_cast(page_allocation_map_tmp)); + + (void)(page_bitmap_buffer_tmp); + (void)(page_allocation_map_tmp); + + // + // Initialize memory pool with garbage. + // This should help with debugging uninitialized variables + // and class members. + // + const auto reserved_bytes = static_cast(page_bitmap_buffer_size_ + page_allocation_map_size_); + memset(base_address_ + reserved_bytes, 0xcc, size - reserved_bytes); + + // + // Set initial values of allocated/free bytes. + // + allocated_bytes_ = 0; + free_bytes_ = size; + + return {}; + } + + void hypervisor_memory_allocator::detach() noexcept + { + // + // If no memory has been assigned - leave. + // + if (!base_address_) + { + return; + } + + // + // Note that this method doesn't acquire the lock and + // assumes all allocations has been already freed. + // + + // + // Mark memory of page_bitmap and page_allocation_map + // as freed. + // + // Note that everything "free" does is clear bits in + // page_bitmap and sets 0 to particular page_allocation_map + // items. + // + // These two calls are needed to assure that the next two + // asserts below will pass. + // + free(page_bitmap_.buffer()); + free(page_allocation_map_); + + // + // Checks for memory leaks. + // + hvpp_assert(page_bitmap_.all_clear()); + + // + // Checks for allocator corruption. + // + hvpp_assert(std::all_of( + page_allocation_map_, + page_allocation_map_ + page_allocation_map_size_ / sizeof(pgmap_t), + [](auto page_count) { return page_count == 0; })); + + base_address_ = nullptr; + capacity_ = 0; + + page_bitmap_buffer_size_ = 0; + + page_allocation_map_ = nullptr; + page_allocation_map_size_ = 0; + + last_page_offset_ = 0; + allocated_bytes_ = 0; + free_bytes_ = 0; + } + + auto hypervisor_memory_allocator::allocate(size_t size) noexcept -> void* + { + hvpp_assert(base_address_ != nullptr && capacity_ > 0); + + // + // Return at least 1 page, even if someone required 0. + // + if (size == 0) + { + hvpp_assert(0); + size = 1; + } + + auto page_count = static_cast(bytes_to_pages(size)); + + // + // Check if the desired number of pages can fit into the + // allocation map. + // + if (page_count > std::numeric_limits::max() - 1) + { + hvpp_assert(0); + return nullptr; + } + + int previous_page_offset; + + { + std::lock_guard _{ lock_ }; + + last_page_offset_ = page_bitmap_.find_first_clear(last_page_offset_, page_count); + + if (last_page_offset_ == -1) + { + last_page_offset_ = 0; + last_page_offset_ = page_bitmap_.find_first_clear(last_page_offset_, page_count); + + if (last_page_offset_ == -1) + { + // + // Not enough memory... + // + hvpp_assert(0); + return nullptr; + } + } + + page_bitmap_.set(last_page_offset_, page_count); + page_allocation_map_[last_page_offset_] = static_cast(page_count); + + previous_page_offset = last_page_offset_; + last_page_offset_ += page_count; + + allocated_bytes_ += page_count * page_size; + free_bytes_ -= page_count * page_size; + } + + // + // Return the final address. + // Note that we're not under lock here - we don't need it, because + // everything neccessary has been done (bitmap + page allocation map + // manipulation). + // + return base_address_ + previous_page_offset * page_size; + } + + auto hypervisor_memory_allocator::allocate_aligned(size_t size, size_t alignment) noexcept -> void* + { + // + // Our allocator always returns page-aligned memory. + // Therefore all alignments that are in interval (0, 4096] + // AND are power of 2 are valid. + // + hvpp_assert( + alignment > 0 && + alignment <= 4096 && + !(alignment & (alignment - 1)) // is power of 2 + ); + + return allocate(size); + } + + void hypervisor_memory_allocator::free(void* address) noexcept + { + // + // Our allocator always provides page-aligned memory. + // + hvpp_assert(byte_offset(address) == 0); + + if (address == nullptr) + { + // + // Return immediatelly if we're trying to free NULL. + // + // hvpp_assert(0); + return; + } + + const auto offset = static_cast(bytes_to_pages(reinterpret_cast(address) - base_address_)); + + if (size_t(offset) * page_size > capacity_) + { + // + // We don't own this memory. + // + hvpp_assert(0); + return; + } + + std::lock_guard _{ lock_ }; + + if (page_allocation_map_[offset] == 0) + { + // + // This memory wasn't allocated. + // + hvpp_assert(0); + return; + } + + // + // Clear number of allocated pages. + // + const auto page_count = static_cast(page_allocation_map_[offset]); + page_allocation_map_[offset] = 0; + + // + // Clear pages in the bitmap. + // + page_bitmap_.clear(offset, page_count); + + allocated_bytes_ -= page_count * page_size; + free_bytes_ += page_count * page_size; + } + + bool hypervisor_memory_allocator::contains(void* address) noexcept + { + return + reinterpret_cast(address) >= base_address_ && + reinterpret_cast(address) < base_address_ + capacity_; + } + + auto hypervisor_memory_allocator::allocated_bytes() noexcept -> size_t + { + return allocated_bytes_; + } + + auto hypervisor_memory_allocator::free_bytes() noexcept -> size_t + { + return free_bytes_; + } +} diff --git a/src/hvpp/hvpp/lib/mm/memory_allocator/hypervisor_memory_allocator.h b/src/hvpp/hvpp/lib/mm/memory_allocator/hypervisor_memory_allocator.h new file mode 100644 index 0000000..83e0600 --- /dev/null +++ b/src/hvpp/hvpp/lib/mm/memory_allocator/hypervisor_memory_allocator.h @@ -0,0 +1,49 @@ +#pragma once +#include "../memory_allocator.h" + +#include "../../bitmap.h" +#include "../../object.h" +#include "../../spinlock.h" + +namespace mm +{ + class hypervisor_memory_allocator + : public memory_allocator + { + public: + hypervisor_memory_allocator() noexcept; + ~hypervisor_memory_allocator() noexcept override; + + auto attach(void* address, size_t size) noexcept -> error_code_t override; + void detach() noexcept override; + + auto allocate(size_t size) noexcept -> void* override; + auto allocate_aligned(size_t size, size_t alignment) noexcept -> void* override; + void free(void* address) noexcept override; + + bool contains(void* address) noexcept override; + + auto allocated_bytes() noexcept -> size_t override; + auto free_bytes() noexcept -> size_t override; + + private: + using pgbmp_t = bitmap<>; + using pgmap_t = uint16_t; + + uint8_t* base_address_; // Pool base address + size_t capacity_; // Capacity of the pool + + pgbmp_t page_bitmap_; // Bitmap holding used pages + int page_bitmap_buffer_size_; // + + pgmap_t* page_allocation_map_; // Map holding number of allocated pages + int page_allocation_map_size_; // + + int last_page_offset_; // Last returned page offset - used as hint + + size_t allocated_bytes_; + size_t free_bytes_; + + spinlock lock_; + }; +} diff --git a/src/hvpp/hvpp/lib/mm/memory_allocator/system_memory_allocator.cpp b/src/hvpp/hvpp/lib/mm/memory_allocator/system_memory_allocator.cpp new file mode 100644 index 0000000..fbccef6 --- /dev/null +++ b/src/hvpp/hvpp/lib/mm/memory_allocator/system_memory_allocator.cpp @@ -0,0 +1,54 @@ +#include "system_memory_allocator.h" + +namespace mm +{ + namespace detail + { + auto system_allocate(size_t size) noexcept -> void*; + auto system_allocate_aligned(size_t size, size_t alignment) noexcept -> void*; + void system_free(void* address) noexcept; + } + + auto system_memory_allocator::attach(void* address, size_t size) noexcept -> error_code_t + { + (void)(address); + (void)(size); + return make_error_code_t(std::errc::not_supported); + } + + void system_memory_allocator::detach() noexcept + { + return; + } + + auto system_memory_allocator::allocate(size_t size) noexcept -> void* + { + return detail::system_allocate(size); + } + + auto system_memory_allocator::allocate_aligned(size_t size, size_t alignment) noexcept -> void* + { + return detail::system_allocate_aligned(size, alignment); + } + + void system_memory_allocator::free(void* address) noexcept + { + detail::system_free(address); + } + + bool system_memory_allocator::contains(void* address) noexcept + { + (void)(address); + return true; + } + + auto system_memory_allocator::allocated_bytes() noexcept -> size_t + { + return 0; + } + + auto system_memory_allocator::free_bytes() noexcept -> size_t + { + return 0; + } +} diff --git a/src/hvpp/hvpp/lib/mm/memory_allocator/system_memory_allocator.h b/src/hvpp/hvpp/lib/mm/memory_allocator/system_memory_allocator.h new file mode 100644 index 0000000..3290a4a --- /dev/null +++ b/src/hvpp/hvpp/lib/mm/memory_allocator/system_memory_allocator.h @@ -0,0 +1,22 @@ +#pragma once +#include "../memory_allocator.h" + +namespace mm +{ + class system_memory_allocator + : public memory_allocator + { + public: + auto attach(void* address, size_t size) noexcept -> error_code_t override; + void detach() noexcept override; + + auto allocate(size_t size) noexcept -> void* override; + auto allocate_aligned(size_t size, size_t alignment) noexcept -> void* override; + void free(void* address) noexcept override; + + bool contains(void* address) noexcept override; + + auto allocated_bytes() noexcept -> size_t override; + auto free_bytes() noexcept -> size_t override; + }; +} diff --git a/src/hvpp/hvpp/lib/mm/memory_allocator/win32/system_memory_allocator.cpp b/src/hvpp/hvpp/lib/mm/memory_allocator/win32/system_memory_allocator.cpp new file mode 100644 index 0000000..43cc239 --- /dev/null +++ b/src/hvpp/hvpp/lib/mm/memory_allocator/win32/system_memory_allocator.cpp @@ -0,0 +1,52 @@ +#include + +#define HVPP_MEMORY_TAG 'ppvh' + +namespace mm::detail +{ + auto system_allocate(size_t size) noexcept -> void* + { + return ExAllocatePoolWithTag(NonPagedPool, size, HVPP_MEMORY_TAG); + } + + auto system_allocate_aligned(size_t size, size_t alignment) noexcept -> void* + { + // + // If NumberOfBytes is PAGE_SIZE or greater, a page-aligned buffer is allocated. + // Memory allocations of PAGE_SIZE or less are allocated within a page and do not + // cross page boundaries. Memory allocations of less than PAGE_SIZE are not + // necessarily page-aligned but are aligned to 8-byte boundaries in 32-bit systems + // and to 16-byte boundaries in 64-bit systems. + // + // (ref: https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/wdm/nf-wdm-exallocatepoolwithtag) + + NT_ASSERT( + alignment > 0 && + alignment <= 4096 && + !(alignment & (alignment - 1)) // is power of 2 + ); + + if (size >= PAGE_SIZE || alignment <= sizeof(void*)) + { + return system_allocate(size); + } + + // + // Force alignment to PAGE_SIZE. + // + return system_allocate(PAGE_SIZE); + } + + void system_free(void* address) noexcept + { + // + // ExFreePoolWithTag doesn't support freeing NULL. + // + if (address == nullptr) + { + return; + } + + ExFreePoolWithTag(address, HVPP_MEMORY_TAG); + } +} diff --git a/src/hvpp/hvpp/lib/win32/mm.cpp b/src/hvpp/hvpp/lib/win32/mm.cpp deleted file mode 100644 index 62be811..0000000 --- a/src/hvpp/hvpp/lib/win32/mm.cpp +++ /dev/null @@ -1,28 +0,0 @@ -#include "../mm.h" - -#include - -#define HVPP_MEMORY_TAG 'ppvh' - -namespace mm::detail -{ - auto system_allocate(size_t size) noexcept -> void* - { - return ExAllocatePoolWithTag(NonPagedPool, - size, - HVPP_MEMORY_TAG); - } - - void system_free(void* address) noexcept - { - // - // ExFreePoolWithTag doesn't support freeing NULL. - // - if (address == nullptr) - { - return; - } - - ExFreePoolWithTag(address, HVPP_MEMORY_TAG); - } -} diff --git a/src/hvppdrv/main.cpp b/src/hvppdrv/main.cpp index 2d9dc0c..3e337ec 100644 --- a/src/hvppdrv/main.cpp +++ b/src/hvppdrv/main.cpp @@ -87,7 +87,7 @@ namespace driver // Tell debugger we're started. // hvpp_info("Hypervisor started, current free memory: %" PRIu64 " MB", - mm::free_bytes() / 1024 / 1024); + mm::hypervisor_allocator()->free_bytes() / 1024 / 1024); return {}; }