obkrnl/malloc/
mod.rs

1use self::vm::VmHeap;
2use crate::context::current_thread;
3use crate::lock::Mutex;
4use alloc::boxed::Box;
5use core::alloc::{GlobalAlloc, Layout};
6use core::cell::{RefCell, UnsafeCell};
7use core::hint::unreachable_unchecked;
8use core::ptr::{NonNull, null_mut};
9use talc::{ClaimOnOom, Span, Talc};
10
11mod vm;
12
13/// Implementation of [`GlobalAlloc`] for objects belong to kernel space.
14///
15/// This allocator has 2 stages. The first stage will allocate a memory from a static buffer (AKA
16/// arena). This stage will be primary used for bootstrapping the kernel. The second stage will be
17/// activated once the required subsystems has been initialized.
18///
19/// The first stage is **not** thread safe so stage 2 must be activated before start a new CPU.
20pub struct KernelHeap {
21    stage: UnsafeCell<Stage>,
22    primitive_ptr: *const u8,
23    primitive_end: *const u8,
24}
25
26impl KernelHeap {
27    /// # Safety
28    /// The specified memory must be valid for reads and writes and it must be exclusively available
29    /// to [`KernelHeap`].
30    pub const unsafe fn new<const L: usize>(primitive: *mut [u8; L]) -> Self {
31        let primitive_ptr = primitive.cast();
32
33        // SAFETY: The safety requirement of our function satify the safety requirement of
34        // ClaimOnOom::new().
35        let primitive = unsafe { Talc::new(ClaimOnOom::new(Span::from_array(primitive))) };
36
37        Self {
38            stage: UnsafeCell::new(Stage::One(RefCell::new(primitive))),
39            primitive_ptr,
40            // SAFETY: L is a length of primitive_ptr so the resulting pointer is valid.
41            primitive_end: unsafe { primitive_ptr.add(L) },
42        }
43    }
44
45    /// # Safety
46    /// This must be called by main CPU and can be called only once.
47    pub unsafe fn activate_stage2(&self) {
48        // Setup VM  heap using primitive heap.
49        let vm = Box::new(VmHeap::new());
50
51        // What we are doing here is highly unsafe. Do not edit the code after this unless you know
52        // what you are doing!
53        let stage = self.stage.get();
54        let primitive = match unsafe { stage.read() } {
55            Stage::One(v) => Mutex::new(v.into_inner()),
56            // SAFETY: The safety requirement of our function make this unreachable.
57            Stage::Two(_, _) => unsafe { unreachable_unchecked() },
58        };
59
60        // Switch to stage 2 WITHOUT dropping the value contained in Stage::One. What we did here is
61        // moving the value from Stage::One to Stage::Two.
62        unsafe { stage.write(Stage::Two(vm, primitive)) };
63    }
64}
65
66unsafe impl GlobalAlloc for KernelHeap {
67    #[inline(never)]
68    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
69        // If stage 2 has not activated yet then this function is not allowed to access the CPU
70        // context due to it can be called before the context has been activated.
71
72        // SAFETY: GlobalAlloc::alloc required layout to be non-zero.
73        match unsafe { &*self.stage.get() } {
74            Stage::One(primitive) => unsafe {
75                primitive
76                    .borrow_mut()
77                    .malloc(layout)
78                    .map(|v| v.as_ptr())
79                    .unwrap_or(null_mut())
80            },
81            Stage::Two(vm, primitive) => match current_thread().active_heap_guard() {
82                0 => unsafe { vm.alloc(layout) },
83                _ => unsafe {
84                    primitive
85                        .lock()
86                        .malloc(layout)
87                        .map(|v| v.as_ptr())
88                        .unwrap_or(null_mut())
89                },
90            },
91        }
92    }
93
94    #[inline(never)]
95    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
96        // If stage 2 has not activated yet then this function is not allowed to access the CPU
97        // context due to it can be called before the context has been activated.
98
99        // SAFETY: GlobalAlloc::dealloc required ptr to be the same one that returned
100        // from our GlobalAlloc::alloc and layout to be the same one that passed to it.
101        match unsafe { &*self.stage.get() } {
102            Stage::One(primitive) => unsafe {
103                primitive
104                    .borrow_mut()
105                    .free(NonNull::new_unchecked(ptr), layout)
106            },
107            Stage::Two(vm, primitive) => {
108                if ptr.cast_const() >= self.primitive_ptr && ptr.cast_const() < self.primitive_end {
109                    unsafe { primitive.lock().free(NonNull::new_unchecked(ptr), layout) }
110                } else {
111                    // SAFETY: ptr is not owned by primitive heap so with the requirements of
112                    // GlobalAlloc::dealloc the ptr will be owned by VM heap for sure.
113                    unsafe { vm.dealloc(ptr, layout) };
114                }
115            }
116        }
117    }
118}
119
120// We impose restriction on the user to activate stage 2 before going multi-threaded.
121unsafe impl Send for KernelHeap {}
122unsafe impl Sync for KernelHeap {}
123
124/// Stage of [KernelHeap].
125enum Stage {
126    One(RefCell<Talc<ClaimOnOom>>),
127    Two(Box<VmHeap>, Mutex<Talc<ClaimOnOom>>),
128}