Skip to main content

obkrnl/uma/
keg.rs

1use super::arch::small_alloc;
2use super::{Alloc, FreeItem, Slab, SlabHdr, Uma, UmaFlags};
3use crate::config::{PAGE_MASK, PAGE_SHIFT, PAGE_SIZE};
4use crate::lock::Mutex;
5use crate::vm::Vm;
6use alloc::collections::vec_deque::VecDeque;
7use alloc::sync::Arc;
8use core::alloc::Layout;
9use core::cmp::{max, min};
10use core::num::NonZero;
11use core::ptr::NonNull;
12use core::sync::atomic::{AtomicU32, Ordering};
13
14/// Implementation of `uma_keg` structure.
15pub struct UmaKeg<T> {
16    vm: Arc<Vm>,
17    size: NonZero<usize>,             // uk_size
18    rsize: usize,                     // uk_rsize
19    pgoff: usize,                     // uk_pgoff
20    ppera: usize,                     // uk_ppera
21    ipers: usize,                     // uk_ipers
22    alloc: fn(&Vm, Alloc) -> *mut u8, // uk_allocf
23    init: Option<fn()>,               // uk_init
24    max_pages: usize,                 // uk_maxpages
25    recurse: AtomicU32,               // uk_recurse
26    flags: UmaFlags,                  // uk_flags
27    state: Mutex<KegState<T>>,
28}
29
30impl<T: FreeItem> UmaKeg<T> {
31    /// `align` is the actual alignment **minus** one, which mean if you want each item to be 8
32    /// bytes alignment this value will be 7.
33    ///
34    /// See `keg_ctor` on the Orbis for a reference.
35    ///
36    /// # Reference offsets
37    /// | Version | Offset |
38    /// |---------|--------|
39    /// |PS4 11.00|0x13CF40|
40    pub(super) fn new(
41        vm: Arc<Vm>,
42        size: NonZero<usize>,
43        align: usize,
44        init: Option<fn()>,
45        mut flags: UmaFlags,
46    ) -> Self {
47        if flags.has_any(UmaFlags::Vm) {
48            todo!()
49        }
50
51        if flags.has_any(UmaFlags::ZInit) {
52            todo!()
53        }
54
55        if flags.has_any(UmaFlags::Malloc) {
56            flags |= UmaFlags::VToSlab;
57        }
58
59        flags |= T::flags();
60
61        // Get header layout.
62        let hdr = Layout::new::<SlabHdr<T>>();
63        let (mut hdr, off) = hdr.extend(Layout::new::<T>()).unwrap();
64
65        hdr = hdr.pad_to_align();
66
67        // Get UMA_FRITM_SZ and UMA_FRITMREF_SZ.
68        let free_item = hdr.size() - off;
69        let available = PAGE_SIZE.get() - hdr.size();
70
71        // Get uk_rsize, uk_ppera and uk_ipers.
72        let (rsize, ppera, ipers) = if flags.has_any(UmaFlags::CacheSpread) {
73            // Get uk_rsize.
74            let rsize = size.get().next_multiple_of(align + 1);
75            let align = align + 1;
76            let rsize = if (rsize & align) == 0 {
77                // TODO: What is this?
78                rsize + align
79            } else {
80                rsize
81            };
82
83            // Get uk_ppera.
84            let pages = (PAGE_SIZE.get() / align * rsize) >> PAGE_SHIFT;
85            let ppera = min(pages, (128 * 1024) / PAGE_SIZE);
86
87            // TODO: Why we need to add the differences to the calculation?
88            let ipers = (ppera * PAGE_SIZE.get() + (rsize - size.get())) / rsize;
89
90            (rsize, ppera, ipers)
91        } else {
92            // TODO: Not sure why we need space at least for 2 free item?
93            if (size.get() + free_item) > available {
94                // TODO: Set uk_ppera and uk_rsize.
95                if !flags.has_any(UmaFlags::Internal) {
96                    flags |= UmaFlags::Offpage;
97
98                    if !flags.has_any(UmaFlags::VToSlab) {
99                        flags |= UmaFlags::Hash;
100                    }
101                }
102
103                // Get uk_ppera.
104                let mut ppera = size.get() >> PAGE_SHIFT;
105
106                if size.get() > (size.get() & !PAGE_MASK.get()) {
107                    ppera += 1;
108                }
109
110                (size.get(), ppera, 1)
111            } else {
112                // Get uk_rsize.
113                let rsize = max(size, Uma::SMALLEST_UNIT);
114                let rsize = rsize.get().next_multiple_of(align + 1);
115
116                // Get uk_ipers.
117                let mut ipers = available / (rsize + free_item);
118
119                // TODO: Verify if this valid for PAGE_SIZE < 0x4000.
120                if !flags.has_any(UmaFlags::Internal | UmaFlags::CacheOnly)
121                    && (available % (rsize + free_item)) >= Uma::MAX_WASTE.get()
122                    && (PAGE_SIZE.get() / rsize) > ipers
123                {
124                    ipers = PAGE_SIZE.get() / rsize;
125
126                    if flags.has_any(UmaFlags::VToSlab) {
127                        flags |= UmaFlags::Offpage;
128                    } else {
129                        flags |= UmaFlags::Offpage | UmaFlags::Hash;
130                    }
131                }
132
133                (rsize, 1, ipers)
134            }
135        };
136
137        if flags.has_any(UmaFlags::Offpage) {
138            // TODO: Set uk_slabzone.
139        }
140
141        // Get allocator.
142        let alloc = if ppera == 1 {
143            // TODO: Get uk_freef.
144            small_alloc
145        } else {
146            Self::page_alloc
147        };
148
149        if flags.has_any(UmaFlags::MtxClass) {
150            todo!()
151        }
152
153        // Get uk_pgoff.
154        let mut pgoff = 0;
155
156        if !flags.has_any(UmaFlags::Offpage) {
157            let space = ppera * PAGE_SIZE.get();
158
159            // TODO: This can cause a pointer to slab unaligned.
160            pgoff = (space - hdr.size()) - ipers * free_item;
161
162            // TODO: What is this?
163            if space < pgoff + hdr.size() + ipers * free_item {
164                panic!("UMA slab won't fit");
165            }
166        }
167
168        if flags.has_any(UmaFlags::Hash) {
169            todo!()
170        }
171
172        // TODO: Add uk_zones.
173        // TODO: Add uma_kegs.
174        Self {
175            vm,
176            size,
177            rsize,
178            pgoff,
179            ppera,
180            ipers,
181            alloc,
182            init,
183            max_pages: 0,
184            recurse: AtomicU32::new(0),
185            flags,
186            state: Mutex::new(KegState {
187                pages: 0,
188                free: 0,
189                partial_slabs: VecDeque::new(),
190            }),
191        }
192    }
193}
194
195impl<T> UmaKeg<T> {
196    pub fn size(&self) -> NonZero<usize> {
197        self.size
198    }
199
200    pub fn allocated_size(&self) -> usize {
201        self.rsize
202    }
203
204    pub fn item_per_slab(&self) -> usize {
205        self.ipers
206    }
207
208    pub fn recurse(&self) -> u32 {
209        self.recurse.load(Ordering::Relaxed)
210    }
211
212    pub fn flags(&self) -> UmaFlags {
213        self.flags
214    }
215
216    /// See `page_alloc` on the Orbis for a reference.
217    ///
218    /// # Reference offsets
219    /// | Version | Offset |
220    /// |---------|--------|
221    /// |PS4 11.00|0x1402F0|
222    fn page_alloc(_: &Vm, _: Alloc) -> *mut u8 {
223        todo!()
224    }
225}
226
227impl<T: FreeItem> UmaKeg<T> {
228    /// Unlike Orbis, our slab contains a strong reference to its keg. That mean all allocated slabs
229    /// need to free manually otherwise the keg will be leak.
230    ///
231    /// See `keg_fetch_slab` on the Orbis for a reference.
232    ///
233    /// # Reference offsets
234    /// | Version | Offset |
235    /// |---------|--------|
236    /// |PS4 11.00|0x141E20|
237    pub fn fetch_slab(self: &Arc<Self>, mut flags: Alloc) -> Option<NonNull<Slab<T>>> {
238        let mut state = self.state.lock();
239
240        while state.free == 0 {
241            if flags.has_any(Alloc::NoVm) {
242                return None;
243            }
244
245            #[allow(clippy::while_immutable_condition)] // TODO: Remove this.
246            while self.max_pages != 0 && self.max_pages <= state.pages {
247                todo!()
248            }
249
250            self.recurse.fetch_add(1, Ordering::Relaxed);
251            let slab = self.alloc_slab(&mut state, flags);
252            self.recurse.fetch_sub(1, Ordering::Relaxed);
253
254            if let Some(slab) = NonNull::new(slab) {
255                state.partial_slabs.push_front(slab);
256                return Some(slab);
257            }
258
259            flags |= Alloc::NoVm;
260        }
261
262        todo!()
263    }
264
265    /// See `keg_alloc_slab` on the Orbis for a reference.
266    ///
267    /// # Reference offsets
268    /// | Version | Offset |
269    /// |---------|--------|
270    /// |PS4 11.00|0x13FBA0|
271    fn alloc_slab(self: &Arc<Self>, state: &mut KegState<T>, flags: Alloc) -> *mut Slab<T> {
272        if self.flags.has_any(UmaFlags::Offpage) {
273            todo!()
274        } else {
275            // Get allocation flags.
276            let flags = if self.flags.has_any(UmaFlags::Malloc) {
277                flags & !Alloc::Zero
278            } else {
279                flags | Alloc::Zero
280            };
281
282            // Allocate.
283            let mem = (self.alloc)(&self.vm, flags);
284
285            if !mem.is_null() {
286                // The Orbis also check if uk_flags does not contains UMA_ZONE_OFFPAGE, which seems
287                // to be useless since we only be here when it does not contains UMA_ZONE_OFFPAGE.
288                let hdr = unsafe { mem.byte_add(self.pgoff).cast::<SlabHdr<T>>() };
289
290                if self.flags.has_any(UmaFlags::VToSlab) && self.ppera != 0 {
291                    todo!()
292                }
293
294                // TODO: I'm not confident about the memory layout here. The variables calculation
295                // during keg construction is very complicated and I don't fully understand it. If
296                // we encounter some memory corruptions then this is likely to be the root of
297                // problem.
298                let v = SlabHdr {
299                    keg: self.clone(),
300                    free_count: self.ipers,
301                    first_free: 0,
302                    items: mem,
303                };
304
305                unsafe { hdr.write(v) };
306
307                // Initialize free items. The offset calculation here should be optimized away.
308                let (_, off) = Layout::new::<SlabHdr<T>>()
309                    .extend(Layout::new::<T>())
310                    .unwrap();
311                let free = unsafe { hdr.byte_add(off).cast::<T>() };
312
313                for i in 0..self.ipers {
314                    let item = T::new(i);
315
316                    unsafe { free.add(i).write(item) };
317                }
318
319                if self.init.is_some() {
320                    todo!()
321                }
322
323                if self.flags.has_any(UmaFlags::Hash) {
324                    todo!()
325                }
326
327                state.pages += self.ppera;
328                state.free += self.ipers;
329
330                return core::ptr::slice_from_raw_parts_mut(hdr, self.ipers) as *mut Slab<T>;
331            }
332
333            todo!()
334        }
335    }
336}
337
338/// Mutable state of [UmaKeg].
339struct KegState<T> {
340    pages: usize,                              // uk_pages
341    free: usize,                               // uk_free
342    partial_slabs: VecDeque<NonNull<Slab<T>>>, // uk_part_slab
343}
344
345unsafe impl<T: Send> Send for KegState<T> {}