Skip to content

Commit

Permalink
cargo fmt, symlink project metadata
Browse files Browse the repository at this point in the history
  • Loading branch information
SFBdragon committed Apr 13, 2024
1 parent eaabfd1 commit c95e1a7
Show file tree
Hide file tree
Showing 8 changed files with 43 additions and 47 deletions.
1 change: 1 addition & 0 deletions talc/LICENSE.md
1 change: 1 addition & 0 deletions talc/README.md
1 change: 1 addition & 0 deletions talc/README_WASM.md
9 changes: 4 additions & 5 deletions talc/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,15 @@
#![cfg_attr(feature = "nightly_api", feature(slice_ptr_len))]
#![cfg_attr(feature = "nightly_api", feature(const_slice_ptr_len))]

mod talc;
mod span;
mod oom_handler;
mod ptr_utils;
mod span;
mod talc;

#[cfg(feature = "lock_api")]
mod talck;
#[cfg(feature = "lock_api")]
pub mod locking;

#[cfg(feature = "lock_api")]
mod talck;

pub use oom_handler::{ClaimOnOom, ErrOnOom, OomHandler};
pub use span::Span;
Expand Down
12 changes: 6 additions & 6 deletions talc/src/locking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,17 @@
//! Use of the `spin` crate's mutex with [`Talck`](crate::Talc) is a good default.

/// #### WARNING: [`AssumeUnlockable`] may cause undefined behaviour without `unsafe` code!
///
///
/// A dummy [`RawMutex`](lock_api::RawMutex) implementation to skip synchronization on single threaded systems.
///
/// # Safety
/// [`AssumeUnlockable`] is highly unsafe and may cause undefined behaviour if multiple
/// [`AssumeUnlockable`] is highly unsafe and may cause undefined behaviour if multiple
/// threads enter a critical section it guards, even without explicit unsafe code.
///
/// Note that uncontended spin locks are cheap. Usage is only recommended on
///
/// Note that uncontended spin locks are cheap. Usage is only recommended on
/// platforms that don't have atomics or are exclusively single threaded.
///
/// Through no fault of its own, `lock_api`'s API does not allow for safe
///
/// Through no fault of its own, `lock_api`'s API does not allow for safe
/// encapsulation of this functionality. This is a hack for backwards compatibility.
pub struct AssumeUnlockable;

Expand Down
9 changes: 4 additions & 5 deletions talc/src/talc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -504,24 +504,23 @@ impl<O: OomHandler> Talc<O> {
old_layout: Layout,
new_size: usize,
) -> Result<NonNull<u8>, ()> {

match self.grow_in_place(ptr, old_layout, new_size) {
Err(_) => {
// grow in-place failed; reallocate the slow way
let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
let allocation = self.malloc(new_layout)?;
allocation.as_ptr().copy_from_nonoverlapping(ptr.as_ptr(), old_layout.size());
self.free(ptr, old_layout);

Ok(allocation)
}
res => res,
}
}

/// Attempt to grow a previously allocated/reallocated region of memory to `new_size`.
///
/// Returns `Err` if reallocation could not occur in-place.
///
/// Returns `Err` if reallocation could not occur in-place.
/// Ownership of the memory remains with the caller.
/// # Safety
/// `ptr` must have been previously allocated or reallocated given `layout`.
Expand Down Expand Up @@ -746,7 +745,7 @@ impl<O: OomHandler> Talc<O> {
// register the free memory
let chunk_base = base.wrapping_add(TAG_SIZE);
self.register_gap(chunk_base, acme);

self.scan_for_errors();

#[cfg(feature = "counters")]
Expand Down
35 changes: 17 additions & 18 deletions talc/src/talc/counters.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ pub struct Counters {
/// Sum of active allocations' layouts' size.
pub allocated_bytes: usize,
/// Sum of all allocations' layouts' maximum size.
///
///
/// In-place reallocations's unchanged bytes are not recounted.
pub total_allocated_bytes: u64,

Expand Down Expand Up @@ -117,19 +117,24 @@ impl Counters {

impl core::fmt::Display for Counters {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.write_fmt(format_args!(r#"Stat | Running Total | Accumulative Total
f.write_fmt(format_args!(
r#"Stat | Running Total | Accumulative Total
---------------------|---------------------|--------------------
# of Allocations | {:>19} | {:>19}
# of Allocated Bytes | {:>19} | {:>19}
# of Available Bytes | {:>19} | N/A
# of Claimed Bytes | {:>19} | {:>19}
# of Heaps | {:>19} | {:>19}
# of Fragments | {:>19} | N/A"#,
self.allocation_count, self.total_allocation_count,
self.allocated_bytes, self.total_allocated_bytes,
# of Fragments | {:>19} | N/A"#,
self.allocation_count,
self.total_allocation_count,
self.allocated_bytes,
self.total_allocated_bytes,
self.available_bytes,
self.claimed_bytes, self.total_claimed_bytes,
self.heap_count, self.total_heap_count,
self.claimed_bytes,
self.total_claimed_bytes,
self.heap_count,
self.total_heap_count,
self.fragment_count
))
}
Expand All @@ -147,7 +152,7 @@ mod tests {

use ptr_utils::{WORD_BITS, WORD_SIZE};

use crate::{*, talc::TAG_SIZE};
use crate::{talc::TAG_SIZE, *};

#[test]
fn test_claim_alloc_free_truncate() {
Expand All @@ -157,9 +162,7 @@ mod tests {

let low = 99;
let high = 10001;
let heap1 = unsafe {
talc.claim(arena.get_mut(low..high).unwrap().into()).unwrap()
};
let heap1 = unsafe { talc.claim(arena.get_mut(low..high).unwrap().into()).unwrap() };

let pre_alloc_claimed_bytes = talc.get_counters().claimed_bytes;
assert!(talc.get_counters().claimed_bytes == heap1.size());
Expand All @@ -182,9 +185,7 @@ mod tests {
assert!(talc.get_counters().overhead_bytes() <= TAG_SIZE + WORD_SIZE * WORD_BITS * 2 + 64);

let alloc_layout = Layout::new::<[u128; 3]>();
let alloc = unsafe {
talc.malloc(alloc_layout).unwrap()
};
let alloc = unsafe { talc.malloc(alloc_layout).unwrap() };

assert!(talc.get_counters().claimed_bytes == pre_alloc_claimed_bytes);
assert!(talc.get_counters().available_bytes < pre_alloc_avl_bytes - alloc_layout.size());
Expand All @@ -197,7 +198,7 @@ mod tests {
assert!(matches!(talc.get_counters().fragment_count, 1..=2));

assert!(talc.get_counters().overhead_bytes() >= 2 * TAG_SIZE);

unsafe {
talc.free(alloc, alloc_layout);
}
Expand All @@ -211,9 +212,7 @@ mod tests {
assert!(talc.get_counters().total_allocation_count == 1);
assert!(talc.get_counters().fragment_count == 1);

let heap1 = unsafe {
talc.truncate(heap1, talc.get_allocated_span(heap1))
};
let heap1 = unsafe { talc.truncate(heap1, talc.get_allocated_span(heap1)) };

assert!(heap1.size() <= TAG_SIZE + WORD_SIZE * WORD_BITS * 2 + 64);

Expand Down
22 changes: 9 additions & 13 deletions talc/src/talck.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,14 @@ use crate::{talc::Talc, OomHandler};
use core::{
alloc::{GlobalAlloc, Layout},
cmp::Ordering,
ptr::{NonNull, null_mut},
ptr::{null_mut, NonNull},
};

#[cfg(feature = "allocator")]
use core::alloc::{Allocator, AllocError};
use core::alloc::{AllocError, Allocator};

#[cfg(all(feature = "allocator-api2", not(feature = "allocator")))]
use allocator_api2::alloc::{Allocator, AllocError};
use allocator_api2::alloc::{AllocError, Allocator};

#[cfg(any(feature = "allocator", feature = "allocator-api2"))]
pub(crate) fn is_aligned_to(ptr: *mut u8, align: usize) -> bool {
Expand All @@ -31,15 +31,13 @@ const RELEASE_LOCK_ON_REALLOC_LIMIT: usize = 0x10000;
/// ```
#[derive(Debug)]
pub struct Talck<R: lock_api::RawMutex, O: OomHandler> {
mutex: lock_api::Mutex<R, Talc<O>>
mutex: lock_api::Mutex<R, Talc<O>>,
}

impl<R: lock_api::RawMutex, O: OomHandler> Talck<R, O> {
/// Create a new `Talck`.
pub const fn new(talc: Talc<O>) -> Self {
Self {
mutex: lock_api::Mutex::new(talc),
}
Self { mutex: lock_api::Mutex::new(talc) }
}

/// Lock the mutex and access the inner `Talc`.
Expand Down Expand Up @@ -87,7 +85,7 @@ unsafe impl<R: lock_api::RawMutex, O: OomHandler> GlobalAlloc for Talck<R, O> {
Ok(ptr) => ptr,
Err(_) => return null_mut(),
};

if old_layout.size() > RELEASE_LOCK_ON_REALLOC_LIMIT {
drop(lock);
allocation.as_ptr().copy_from_nonoverlapping(ptr, old_layout.size());
Expand All @@ -113,9 +111,7 @@ unsafe impl<R: lock_api::RawMutex, O: OomHandler> GlobalAlloc for Talck<R, O> {
/// Convert a nonnull and length to a nonnull slice.
#[cfg(any(feature = "allocator", feature = "allocator-api2"))]
fn nonnull_slice_from_raw_parts(ptr: NonNull<u8>, len: usize) -> NonNull<[u8]> {
unsafe {
NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(ptr.as_ptr(), len))
}
unsafe { NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(ptr.as_ptr(), len)) }
}

#[cfg(any(feature = "allocator", feature = "allocator-api2"))]
Expand All @@ -126,8 +122,8 @@ unsafe impl<R: lock_api::RawMutex, O: OomHandler> Allocator for Talck<R, O> {
}

unsafe { self.lock().malloc(layout) }
.map(|nn| nonnull_slice_from_raw_parts(nn, layout.size()))
.map_err(|_| AllocError)
.map(|nn| nonnull_slice_from_raw_parts(nn, layout.size()))
.map_err(|_| AllocError)
}

unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
Expand Down

0 comments on commit c95e1a7

Please sign in to comment.