obkrnl/malloc/
mod.rs

1use self::vm::VmHeap;
2use crate::context::current_thread;
3use crate::lock::Mutex;
4use alloc::boxed::Box;
5use core::alloc::{GlobalAlloc, Layout};
6use core::cell::{RefCell, UnsafeCell};
7use core::hint::unreachable_unchecked;
8use core::ptr::{NonNull, null_mut};
9use talc::{ClaimOnOom, Span, Talc};
10
11mod vm;
12
13/// Implementation of [`GlobalAlloc`] for objects belong to kernel space.
14///
15/// This allocator has 2 stages. The first stage will allocate a memory from a static buffer (AKA
16/// arena). This stage will be primary used for bootstrapping the kernel. The second stage will be
17/// activated once the required subsystems has been initialized.
18///
19/// The first stage is **not** thread safe so stage 2 must be activated before start a new CPU.
20pub struct KernelHeap {
21    stage: UnsafeCell<Stage>,
22    primitive_ptr: *const u8,
23    primitive_end: *const u8,
24}
25
26impl KernelHeap {
27    /// # Safety
28    /// The specified memory must be valid for reads and writes and it must be exclusively available
29    /// to [`KernelHeap`].
30    pub const unsafe fn new<const L: usize>(primitive: *mut [u8; L]) -> Self {
31        let primitive_ptr = primitive.cast();
32
33        // SAFETY: The safety requirement of our function satify the safety requirement of
34        // ClaimOnOom::new().
35        let primitive = unsafe { Talc::new(ClaimOnOom::new(Span::from_array(primitive))) };
36
37        Self {
38            stage: UnsafeCell::new(Stage::One(RefCell::new(primitive))),
39            primitive_ptr,
40            // SAFETY: L is a length of primitive_ptr so the resulting pointer is valid.
41            primitive_end: unsafe { primitive_ptr.add(L) },
42        }
43    }
44
45    /// # Safety
46    /// This must be called by main CPU and can be called only once.
47    pub unsafe fn activate_stage2(&self) {
48        // Setup VM  heap using primitive heap.
49        let vm = Box::new(VmHeap::new());
50
51        // What we are doing here is highly unsafe. Do not edit the code after this unless you know
52        // what you are doing!
53        let stage = self.stage.get();
54        let primitive = match unsafe { stage.read() } {
55            Stage::One(v) => Mutex::new(v.into_inner()),
56            // SAFETY: The safety requirement of our function make this unreachable.
57            Stage::Two(_, _) => unsafe { unreachable_unchecked() },
58        };
59
60        // Switch to stage 2 WITHOUT dropping the value contained in Stage::One. What we did here is
61        // moving the value from Stage::One to Stage::Two.
62        unsafe { stage.write(Stage::Two(vm, primitive)) };
63    }
64}
65
66unsafe impl GlobalAlloc for KernelHeap {
67    #[inline(never)]
68    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
69        // If stage 2 has not activated yet then this function is not allowed to access the CPU
70        // context due to it can be called before the context has been activated.
71
72        // SAFETY: GlobalAlloc::alloc required layout to be non-zero.
73        match unsafe { &*self.stage.get() } {
74            Stage::One(primitive) => unsafe {
75                primitive
76                    .borrow_mut()
77                    .malloc(layout)
78                    .map_or(null_mut(), |v| v.as_ptr())
79            },
80            Stage::Two(vm, primitive) => match current_thread().active_heap_guard() {
81                0 => unsafe { vm.alloc(layout) },
82                _ => unsafe {
83                    primitive
84                        .lock()
85                        .malloc(layout)
86                        .map_or(null_mut(), |v| v.as_ptr())
87                },
88            },
89        }
90    }
91
92    #[inline(never)]
93    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
94        // If stage 2 has not activated yet then this function is not allowed to access the CPU
95        // context due to it can be called before the context has been activated.
96
97        // SAFETY: GlobalAlloc::dealloc required ptr to be the same one that returned
98        // from our GlobalAlloc::alloc and layout to be the same one that passed to it.
99        match unsafe { &*self.stage.get() } {
100            Stage::One(primitive) => unsafe {
101                primitive
102                    .borrow_mut()
103                    .free(NonNull::new_unchecked(ptr), layout)
104            },
105            Stage::Two(vm, primitive) => {
106                if ptr.cast_const() >= self.primitive_ptr && ptr.cast_const() < self.primitive_end {
107                    unsafe { primitive.lock().free(NonNull::new_unchecked(ptr), layout) }
108                } else {
109                    // SAFETY: ptr is not owned by primitive heap so with the requirements of
110                    // GlobalAlloc::dealloc the ptr will be owned by VM heap for sure.
111                    unsafe { vm.dealloc(ptr, layout) };
112                }
113            }
114        }
115    }
116}
117
118// We impose restriction on the user to activate stage 2 before going multi-threaded.
119unsafe impl Send for KernelHeap {}
120unsafe impl Sync for KernelHeap {}
121
122/// Stage of [KernelHeap].
123enum Stage {
124    One(RefCell<Talc<ClaimOnOom>>),
125    Two(Box<VmHeap>, Mutex<Talc<ClaimOnOom>>),
126}