Skip to content

Commit

Permalink
Enable direct access to multiple heaps (theseus-os#286)
Browse files Browse the repository at this point in the history
* Moved the multiple heaps into a static so that they can be directly accessed, rather than through the global allocator.

* Updated heap evaluations to directly use the multiple heaps when the configuration option "direct_access_to_multiple_heaps" is enabled.
  • Loading branch information
Ramla-I authored and kevinaboos committed May 7, 2020
1 parent 9133d03 commit d3b01d3
Show file tree
Hide file tree
Showing 7 changed files with 139 additions and 36 deletions.
59 changes: 58 additions & 1 deletion applications/test_heap/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,23 @@ extern crate heap;
use alloc::{
string::{String, ToString},
vec::Vec,
collections::{BTreeSet,BTreeMap}
collections::{BTreeSet,BTreeMap},
};
#[cfg(direct_access_to_multiple_heaps)]
use alloc::alloc::Layout;
use hpet::get_hpet;
use hashbrown::HashMap;
use qp_trie::{Trie, wrapper::BString};
use getopts::{Matches, Options};
use libtest::*;
use core::sync::atomic::{AtomicUsize, Ordering};

#[cfg(not(direct_access_to_multiple_heaps))]
use heap::GLOBAL_ALLOCATOR as ALLOCATOR;

#[cfg(direct_access_to_multiple_heaps)]
use heap::DEFAULT_ALLOCATOR as ALLOCATOR;

mod threadtest;
use threadtest::{OBJSIZE, LARGE_SIZE, do_threadtest};

Expand Down Expand Up @@ -133,6 +141,55 @@ fn rmain(matches: Matches) -> Result<(), &'static str> {
Ok(())
}

#[cfg(direct_access_to_multiple_heaps)]
/// Returns the overhead in hpet ticks of trying to access the multiple heaps through its Once wrapper.
fn overhead_of_accessing_multiple_heaps() -> Result<u64, &'static str> {
const TRIES: u64 = 100;
let mut tries: u64 = 0;
let mut max: u64 = core::u64::MIN;
let mut min: u64 = core::u64::MAX;

for _ in 0..TRIES {
let overhead = overhead_of_accessing_multiple_heaps_inner()?;
tries += overhead;
if overhead > max {max = overhead;}
if overhead < min {min = overhead;}
}

let overhead = tries / TRIES as u64;
let err = (overhead * 10 + overhead * THRESHOLD_ERROR_RATIO) / 10;
if max - overhead > err || overhead - min > err {
warn!("overhead_of_accessing_multiple_heaps diff is too big: {} ({} - {}) ctr", max-min, max, min);
}
Ok(overhead)
}

#[cfg(direct_access_to_multiple_heaps)]
/// Internal function that actually calculates overhead of accessing multiple heaps.
/// Only tries to access the multiple heaps once since if we try to run many iterations the loop is optimized away.
/// Returns value in hpet ticks.
fn overhead_of_accessing_multiple_heaps_inner() -> Result<u64, &'static str> {
let hpet = get_hpet();
let start = hpet.as_ref().ok_or("couldn't get HPET timer")?.get_counter();

let allocator = match ALLOCATOR.try() {
Some(allocator) => allocator,
None => {
error!("Multiple heaps not initialized!");
return Err("Multiple heaps not initialized!");
}
};


let end = hpet.as_ref().ok_or("couldn't get HPET timer")?.get_counter();

let layout = Layout::from_size_align(8, 8).unwrap();
let ptr = unsafe {allocator.alloc(layout)};
unsafe{allocator.dealloc(ptr, layout)};

Ok(end-start)
}


fn do_vec() {
let tries = TRIES;
Expand Down
39 changes: 31 additions & 8 deletions applications/test_heap/src/shbench.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,17 @@
use alloc::{
vec::Vec,
string::String,
alloc::{GlobalAlloc, Layout},
alloc::Layout,
};
#[cfg(not(direct_access_to_multiple_heaps))]
use alloc::alloc::GlobalAlloc;
use core::sync::atomic::{AtomicUsize, Ordering};
use core::ptr;
use hpet::get_hpet;
use libtest::hpet_2_ns;
use heap::ALLOCATOR;
use crate::NTHREADS;
use crate::{NTHREADS, ALLOCATOR};
#[cfg(direct_access_to_multiple_heaps)]
use crate::overhead_of_accessing_multiple_heaps;

pub(crate) static NITERATIONS: AtomicUsize = AtomicUsize::new(1000);
pub(crate) static MAX_BLOCK_SIZE: AtomicUsize = AtomicUsize::new(MAX_REGULAR);
Expand All @@ -36,6 +39,12 @@ pub fn do_shbench() -> Result<(), &'static str> {
let hpet = get_hpet();
println!("Running shbench for {} threads, {} total iterations, {} iterations per thread, {} max block size, {} min block size ...",
nthreads, niterations, niterations/nthreads, MAX_BLOCK_SIZE.load(Ordering::SeqCst), MIN_BLOCK_SIZE.load(Ordering::SeqCst));

#[cfg(direct_access_to_multiple_heaps)]
{
let overhead = overhead_of_accessing_multiple_heaps()?;
println!("Overhead of accessing multiple heaps is: {} ticks, {} ns", overhead, hpet_2_ns(overhead));
}

let start = hpet.as_ref().ok_or("couldn't get HPET timer")?.get_counter();

Expand All @@ -56,6 +65,21 @@ pub fn do_shbench() -> Result<(), &'static str> {


fn worker(_:()) {
#[cfg(not(direct_access_to_multiple_heaps))]
let allocator = &ALLOCATOR;

// In the case of directly accessing the multiple heaps, we do have to access them through the Once wrapper
// at the beginning, but the time it takes to do this once at the beginning of thread is
// insignificant compared to the number of iterations we run. It also printed above.
#[cfg(direct_access_to_multiple_heaps)]
let allocator = match ALLOCATOR.try() {
Some(allocator) => allocator,
None => {
error!("Multiple heaps not initialized!");
return;
}
};

let nthreads = NTHREADS.load(Ordering::SeqCst);
let niterations = NITERATIONS.load(Ordering::SeqCst) / nthreads;
// the total number of allocations that will be stored at one time
Expand Down Expand Up @@ -94,7 +118,7 @@ fn worker(_:()) {

for _ in 0..iterations {
let layout = Layout::from_size_align(size, 2).unwrap();
let ptr = unsafe{ ALLOCATOR.alloc(layout) };
let ptr = unsafe{ allocator.alloc(layout) };

if ptr.is_null() {
error!("Out of Heap Memory");
Expand Down Expand Up @@ -124,14 +148,14 @@ fn worker(_:()) {
}
// free the top part of the buffer, the oldest allocations first
while mp < save_start {
unsafe { ALLOCATOR.dealloc(allocations[mp], layouts[mp]); }
unsafe { allocator.dealloc(allocations[mp], layouts[mp]); }
mp += 1;
}
mp = mpe;
// free the end of the buffer, the newest allocations first
while mp > save_end {
mp -= 1;
unsafe { ALLOCATOR.dealloc(allocations[mp], layouts[mp]); }
unsafe { allocator.dealloc(allocations[mp], layouts[mp]); }
}
mp = 0;
}
Expand All @@ -147,9 +171,8 @@ fn worker(_:()) {
mp = 0;

while mp < mpe {
unsafe{ ALLOCATOR.dealloc(allocations[mp], layouts[mp]); }
unsafe{ allocator.dealloc(allocations[mp], layouts[mp]); }
}
}



34 changes: 29 additions & 5 deletions applications/test_heap/src/threadtest.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,17 @@
use alloc::{
vec::Vec,
string::String,
alloc::{GlobalAlloc, Layout}
alloc::Layout
};
#[cfg(not(direct_access_to_multiple_heaps))]
use alloc::alloc::GlobalAlloc;
use core::sync::atomic::{Ordering, AtomicUsize};
use core::ptr;
use hpet::get_hpet;
use libtest::hpet_2_ns;
use crate::NTHREADS;
use heap::ALLOCATOR;
use crate::{NTHREADS, ALLOCATOR};
#[cfg(direct_access_to_multiple_heaps)]
use crate::overhead_of_accessing_multiple_heaps;


pub(crate) static NITERATIONS: AtomicUsize = AtomicUsize::new(50);
Expand All @@ -34,6 +37,12 @@ pub fn do_threadtest() -> Result<(), &'static str> {
println!("Running threadtest for {} threads, {} iterations, {} total objects, {} obj size ...",
nthreads, NITERATIONS.load(Ordering::SeqCst), NOBJECTS.load(Ordering::SeqCst), OBJSIZE.load(Ordering::SeqCst));

#[cfg(direct_access_to_multiple_heaps)]
{
let overhead = overhead_of_accessing_multiple_heaps()?;
println!("Overhead of accessing multiple heaps is: {} ticks, {} ns", overhead, hpet_2_ns(overhead));
}

let start = hpet.as_ref().ok_or("couldn't get HPET timer")?.get_counter();

for _ in 0..nthreads {
Expand All @@ -53,6 +62,21 @@ pub fn do_threadtest() -> Result<(), &'static str> {


fn worker(_:()) {
#[cfg(not(direct_access_to_multiple_heaps))]
let allocator = &ALLOCATOR;

// In the case of directly accessing the multiple heaps, we do have to access them through the Once wrapper
// at the beginning, but the time it takes to do this once at the beginning of thread is
// insignificant compared to the number of iterations we run. It also printed above.
#[cfg(direct_access_to_multiple_heaps)]
let allocator = match ALLOCATOR.try() {
Some(allocator) => allocator,
None => {
error!("Multiple heaps not initialized!");
return;
}
};

let niterations = NITERATIONS.load(Ordering::SeqCst);
let nobjects = NOBJECTS.load(Ordering::SeqCst);
let nthreads = NTHREADS.load(Ordering::SeqCst);
Expand All @@ -67,11 +91,11 @@ fn worker(_:()) {

for _ in 0..niterations {
for i in 0..(nobjects/nthreads) {
let ptr = unsafe{ ALLOCATOR.alloc(layout) };
let ptr = unsafe{ allocator.alloc(layout) };
allocations[i] = ptr;
}
for i in 0..(nobjects/nthreads) {
unsafe{ ALLOCATOR.dealloc(allocations[i], layout); }
unsafe{ allocator.dealloc(allocations[i], layout); }
}
}
}
Expand Down
4 changes: 1 addition & 3 deletions kernel/heap/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ authors = ["Ramla Ijaz <[email protected]>"]
name = "heap"
description = "global allocator for the system"
version = "0.1.0"
build = "../../build.rs"

[dependencies]
spin = "0.4.10"
Expand All @@ -21,6 +22,3 @@ path = "../kernel_config"

[dependencies.block_allocator]
path = "../block_allocator"



33 changes: 19 additions & 14 deletions kernel/heap/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,18 @@ use block_allocator::FixedSizeBlockAllocator;


#[global_allocator]
pub static ALLOCATOR: Heap = Heap::empty();
pub static GLOBAL_ALLOCATOR: Heap = Heap::empty();

#[cfg(direct_access_to_multiple_heaps)]
/// The default allocator is the one which is set up after the basic system initialization is completed.
/// Currently it is initialized with an instance of `MultipleHeaps`.
/// We only make the default allocator visible when we want to explicitly use it without going through the global allocator.
pub static DEFAULT_ALLOCATOR: Once<Box<dyn GlobalAlloc + Send + Sync>> = Once::new();

#[cfg(not(direct_access_to_multiple_heaps))]
/// The default allocator is the one which is set up after the basic system initialization is completed.
/// Currently it is initialized with an instance of `MultipleHeaps`.
static DEFAULT_ALLOCATOR: Once<Box<dyn GlobalAlloc + Send + Sync>> = Once::new();

/// The heap mapped pages should be writable
pub const HEAP_FLAGS: EntryFlags = EntryFlags::WRITABLE;
Expand All @@ -35,43 +46,37 @@ const INITIAL_HEAP_END_ADDR: usize = KERNEL_HEAP_START + KERNEL_HEAP_INITIAL_SIZ

/// Initializes the single heap, which is the first heap used by the system.
pub fn init_single_heap(start_virt_addr: usize, size_in_bytes: usize) {
unsafe { ALLOCATOR.initial_allocator.lock().init(start_virt_addr, size_in_bytes); }
unsafe { GLOBAL_ALLOCATOR.initial_allocator.lock().init(start_virt_addr, size_in_bytes); }
}


/// Sets a new default allocator for the global heap. It will start being used after this function is called.
/// Sets a new default allocator to be used by the global heap. It will start being used after this function is called.
pub fn set_allocator(allocator: Box<dyn GlobalAlloc + Send + Sync>) {
ALLOCATOR.set_allocator(allocator);
DEFAULT_ALLOCATOR.call_once(|| allocator);
}


/// The heap which is used as a global allocator for the system.
/// It starts off with one basic fixed size allocator, the `initial allocator`.
/// When a more complex heap is created it is set as the default allocator by initializing the `allocator` field.
/// When a more complex heap is created and set as the `DEFAULT_ALLOCATOR`, then it is used.
pub struct Heap {
initial_allocator: MutexIrqSafe<block_allocator::FixedSizeBlockAllocator>,
allocator: Once<Box<dyn GlobalAlloc + Send + Sync>>,
}


impl Heap {
/// Returns a heap in which only the empty initial allocator has been created.
/// Returns a heap in which only an empty initial allocator has been created.
pub const fn empty() -> Heap {
Heap {
initial_allocator: MutexIrqSafe::new(FixedSizeBlockAllocator::new()),
allocator: Once::new(),
}
}

fn set_allocator(&self, allocator: Box<dyn GlobalAlloc + Send + Sync>) {
self.allocator.call_once(|| allocator);
}
}

unsafe impl GlobalAlloc for Heap {

unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
match self.allocator.try() {
match DEFAULT_ALLOCATOR.try() {
Some(allocator) => {
allocator.alloc(layout)
}
Expand All @@ -86,7 +91,7 @@ unsafe impl GlobalAlloc for Heap {
self.initial_allocator.lock().deallocate(ptr, layout);
}
else {
self.allocator.try()
DEFAULT_ALLOCATOR.try()
.expect("Ptr passed to dealloc is not within the initial allocator's range, and another allocator has not been set up")
.dealloc(ptr, layout);
}
Expand Down
2 changes: 1 addition & 1 deletion kernel/libtest/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ pub fn stop_counting_reference_cycles(counter: Counter) -> Result<u64, &'static
Ok(count)
}

const THRESHOLD_ERROR_RATIO: u64 = 1;
pub const THRESHOLD_ERROR_RATIO: u64 = 1;

/// Measures the overhead of using the PMU reference cycles counter.
/// Calls `timing_overhead_inner` multiple times and averages the value.
Expand Down
4 changes: 0 additions & 4 deletions kernel/multiple_heaps/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,3 @@ path = "../heap"
[dependencies.hashbrown]
version = "0.1.8"
features = ["nightly"]

[features]
unsafe_large_allocations = []
unsafe_heap = []

0 comments on commit d3b01d3

Please sign in to comment.