1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
use core::{
    alloc::Layout,
    ptr::{self, NonNull},
    sync::atomic::{AtomicPtr, Ordering},
};

/// The set of free memory blocks.
///
/// It operates by connecting unallocated regions of memory together in a linked
/// list, using the first word of each unallocated region as a pointer to the
/// next.
pub struct Pool {
    /// Block size. Doesn't change in the run-time.
    size: usize,
    /// Address of the byte past the last element. Doesn't change in the
    /// run-time.
    edge: *mut u8,
    /// Free List of previously allocated blocks.
    free: AtomicPtr<u8>,
    /// Pointer growing from the starting address until it reaches the `edge`.
    uninit: AtomicPtr<u8>,
}

unsafe impl Sync for Pool {}

impl Pool {
    /// Creates a new `Pool`.
    pub const fn new(address: usize, size: usize, capacity: usize) -> Self {
        Self {
            size,
            edge: (address + size * capacity) as *mut u8,
            free: AtomicPtr::new(ptr::null_mut()),
            uninit: AtomicPtr::new(address as *mut u8),
        }
    }

    /// Returns the block size.
    #[inline]
    pub fn size(&self) -> usize {
        self.size
    }

    /// Allocates one block of memory.
    ///
    /// If this method returns `Some(addr)`, then the `addr` returned will be
    /// non-null address pointing to the block. If this method returns `None`,
    /// then the pool is exhausted.
    ///
    /// This operation is lock-free and has *O(1)* time complexity.
    pub fn alloc(&self) -> Option<NonNull<u8>> {
        unsafe { self.alloc_free().or_else(|| self.alloc_uninit()) }
    }

    /// Deallocates the block referenced by `ptr`.
    ///
    /// This operation is lock-free and has *O(1)* time complexity.
    ///
    /// # Safety
    ///
    /// * `ptr` must point to a block previously allocated by
    ///   [`alloc`](Pool::alloc).
    /// * `ptr` must not be used after deallocation.
    #[allow(clippy::cast_ptr_alignment)]
    pub unsafe fn dealloc(&self, ptr: NonNull<u8>) {
        loop {
            let curr = self.free.load(Ordering::Acquire);
            ptr::write(ptr.as_ptr() as *mut *mut u8, curr);
            let next = ptr.as_ptr() as *mut u8;
            if self.free.compare_and_swap(curr, next, Ordering::AcqRel) == curr {
                break;
            }
        }
    }

    #[allow(clippy::cast_ptr_alignment)]
    unsafe fn alloc_free(&self) -> Option<NonNull<u8>> {
        loop {
            let curr = self.free.load(Ordering::Acquire);
            if curr.is_null() {
                break None;
            }
            let next = ptr::read(curr as *const *mut u8);
            if self.free.compare_and_swap(curr, next, Ordering::AcqRel) == curr {
                break Some(NonNull::new_unchecked(curr));
            }
        }
    }

    unsafe fn alloc_uninit(&self) -> Option<NonNull<u8>> {
        loop {
            let curr = self.uninit.load(Ordering::Relaxed);
            if curr == self.edge {
                break None;
            }
            let next = curr.add(self.size);
            if self.uninit.compare_and_swap(curr, next, Ordering::Relaxed) == curr {
                break Some(NonNull::new_unchecked(curr));
            }
        }
    }
}

pub trait Fits: Copy {
    fn fits(self, pool: &Pool) -> bool;
}

impl<'a> Fits for &'a Layout {
    #[inline]
    fn fits(self, pool: &Pool) -> bool {
        self.size() <= pool.size
    }
}

impl Fits for NonNull<u8> {
    #[inline]
    fn fits(self, pool: &Pool) -> bool {
        (self.as_ptr() as *mut u8) < pool.edge
    }
}