obkrnl/malloc/
vm.rs

1use crate::config::PAGE_SIZE;
2use crate::context::{CpuLocal, current_thread, current_uma};
3use crate::uma::{Alloc, UmaFlags, UmaZone};
4use alloc::string::ToString;
5use alloc::sync::Arc;
6use alloc::vec::Vec;
7use core::alloc::Layout;
8use core::cell::RefCell;
9use core::num::NonZero;
10
11/// Kernel heap that allocate a memory from a virtual memory management system. This struct is a
12/// merge of `malloc_type` and `malloc_type_internal` structure.
13pub struct VmHeap {
14    zones: [Vec<Arc<UmaZone>>; (usize::BITS - 1) as usize], // kmemsize + kmemzones
15    stats: CpuLocal<RefCell<Stats>>,                        // mti_stats
16}
17
18impl VmHeap {
19    const KMEM_ZSHIFT: usize = 4;
20    const KMEM_ZBASE: usize = 16;
21    const KMEM_ZMASK: usize = Self::KMEM_ZBASE - 1;
22    const KMEM_ZSIZE: usize = PAGE_SIZE.get() >> Self::KMEM_ZSHIFT;
23
24    /// See `kmeminit` on the Orbis for a reference.
25    ///
26    /// # Reference offsets
27    /// | Version | Offset |
28    /// |---------|--------|
29    /// |PS4 11.00|0x1A4B80|
30    pub fn new() -> Self {
31        // The possible of maximum alignment that Layout allowed is a bit before the most
32        // significant bit of isize (e.g. 0x4000000000000000 on 64 bit system). So we can use
33        // "size_of::<usize>() * 8 - 1" to get the size of array for all possible alignment.
34        let uma = current_uma().unwrap();
35        let zones = core::array::from_fn(|align| {
36            let mut zones = Vec::with_capacity(Self::KMEM_ZSIZE + 1);
37            let mut last = 0;
38            let align = align
39                .try_into()
40                .ok()
41                .and_then(|align| 1usize.checked_shl(align))
42                .unwrap();
43
44            for i in Self::KMEM_ZSHIFT.. {
45                // Stop if size larger than page size.
46                let size = NonZero::new(1usize << i).unwrap();
47
48                if size > PAGE_SIZE {
49                    break;
50                }
51
52                // Create zone.
53                let zone = Arc::new(uma.into_owned().create_zone(
54                    size.to_string(),
55                    size,
56                    Some(align - 1),
57                    UmaFlags::Malloc,
58                ));
59
60                while last <= size.get() {
61                    zones.push(zone.clone());
62                    last += Self::KMEM_ZBASE;
63                }
64            }
65
66            zones
67        });
68
69        Self {
70            zones,
71            stats: CpuLocal::new(|_| RefCell::default()),
72        }
73    }
74
75    /// Returns null on failure.
76    ///
77    /// See `malloc` on the Orbis for a reference.
78    ///
79    /// # Safety
80    /// `layout` must be nonzero.
81    ///
82    /// # Reference offsets
83    /// | Version | Offset |
84    /// |---------|--------|
85    /// |PS4 11.00|0x1A4220|
86    pub unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
87        // Our implementation imply M_WAITOK.
88        let td = current_thread();
89
90        if !td.can_sleep() {
91            panic!("heap allocation in a non-sleeping context is not supported");
92        }
93
94        // Determine how to allocate.
95        let lock = td.disable_vm_heap();
96        let size = layout.size();
97        let mem = if size <= PAGE_SIZE.get() {
98            // Get zone to allocate from.
99            let align = layout.align().trailing_zeros() as usize;
100            let size = if (size & Self::KMEM_ZMASK) != 0 {
101                // TODO: Refactor this for readability.
102                (size + Self::KMEM_ZBASE) & !Self::KMEM_ZMASK
103            } else {
104                size
105            };
106
107            // Allocate a memory from UMA zone.
108            let zone = &self.zones[align][size >> Self::KMEM_ZSHIFT];
109            let mem = zone.alloc(Alloc::Wait | Alloc::Zero);
110
111            // Update stats.
112            let stats = self.stats.lock();
113            let mut stats = stats.borrow_mut();
114            let size = if mem.is_null() { 0 } else { zone.size().get() };
115
116            if size != 0 {
117                stats.alloc_bytes = stats
118                    .alloc_bytes
119                    .checked_add(size.try_into().unwrap())
120                    .unwrap();
121                stats.alloc_count += 1;
122            }
123
124            // TODO: How to update mts_size here since our zone table also indexed by alignment?
125            mem
126        } else {
127            todo!()
128        };
129
130        drop(lock);
131
132        mem
133    }
134
135    /// # Safety
136    /// `ptr` must be obtained with [`Self::alloc()`] and `layout` must be the same one that was
137    /// passed to that method.
138    pub unsafe fn dealloc(&self, _: *mut u8, _: Layout) {
139        todo!()
140    }
141}
142
143/// Implementation of `malloc_type_stats` structure.
144#[derive(Default)]
145struct Stats {
146    alloc_bytes: u64, // mts_memalloced
147    alloc_count: u64, // mts_numallocs
148}