obkrnl/uma/
zone.rs

1use super::bucket::{BucketItem, UmaBucket};
2use super::keg::UmaKeg;
3use super::{Alloc, Uma, UmaBox, UmaFlags};
4use crate::context::{CpuLocal, current_thread};
5use crate::lock::{Gutex, GutexGroup, GutexWrite};
6use crate::vm::Vm;
7use alloc::collections::VecDeque;
8use alloc::collections::linked_list::LinkedList;
9use alloc::string::String;
10use alloc::sync::Arc;
11use alloc::vec::Vec;
12use core::cell::RefCell;
13use core::cmp::min;
14use core::num::NonZero;
15use core::ops::DerefMut;
16use core::ptr::null_mut;
17use core::sync::atomic::{AtomicBool, Ordering};
18
19/// Implementation of `uma_zone` structure.
20pub struct UmaZone {
21    bucket_enable: Arc<AtomicBool>,
22    bucket_keys: Arc<Vec<usize>>,
23    bucket_zones: Arc<Vec<UmaZone>>,
24    ty: ZoneType,
25    size: NonZero<usize>,                                           // uz_size
26    kegs: Gutex<LinkedList<UmaKeg>>,                                // uz_kegs + uz_klink
27    slab: fn(&Self, Option<&mut UmaKeg>, Alloc) -> Option<()>,      // uz_slab
28    caches: CpuLocal<RefCell<UmaCache>>,                            // uz_cpu
29    full_buckets: Gutex<VecDeque<UmaBox<UmaBucket<[BucketItem]>>>>, // uz_full_bucket
30    free_buckets: Gutex<VecDeque<UmaBox<UmaBucket<[BucketItem]>>>>, // uz_free_bucket
31    alloc_count: Gutex<u64>,                                        // uz_allocs
32    free_count: Gutex<u64>,                                         // uz_frees
33    count: Gutex<usize>,                                            // uz_count
34    flags: UmaFlags,                                                // uz_flags
35}
36
37impl UmaZone {
38    const ALIGN_CACHE: usize = 63; // uma_align_cache
39
40    /// See `zone_ctor` on Orbis for a reference.
41    ///
42    /// # Reference offsets
43    /// | Version | Offset |
44    /// |---------|--------|
45    /// |PS4 11.00|0x13D490|
46    #[allow(clippy::too_many_arguments)] // TODO: Find a better way.
47    pub(super) fn new(
48        vm: Arc<Vm>,
49        bucket_enable: Arc<AtomicBool>,
50        bucket_keys: Arc<Vec<usize>>,
51        bucket_zones: Arc<Vec<UmaZone>>,
52        name: impl Into<String>,
53        keg: Option<UmaKeg>,
54        size: NonZero<usize>,
55        align: Option<usize>,
56        flags: impl Into<UmaFlags>,
57    ) -> Self {
58        let name = name.into();
59        let flags = flags.into();
60        let (keg, mut flags) = if flags.has_any(UmaFlags::Secondary) {
61            todo!()
62        } else {
63            // We use a different approach here to make it idiomatic to Rust. On Orbis it will
64            // construct a keg here if it is passed from the caller. If not it will allocate a new
65            // keg from masterzone_k.
66            let keg = match keg {
67                Some(v) => v,
68                None => UmaKeg::new(vm, size, align.unwrap_or(Self::ALIGN_CACHE), flags),
69            };
70
71            (keg, UmaFlags::zeroed())
72        };
73
74        // Get type and uz_count.
75        let mut ty = ZoneType::Other;
76        let mut count = 0;
77
78        if !keg.flags().has_any(UmaFlags::Internal) {
79            count = if !keg.flags().has_any(UmaFlags::MaxBucket) {
80                min(keg.item_per_slab(), Uma::BUCKET_MAX)
81            } else {
82                Uma::BUCKET_MAX
83            };
84
85            match name.as_str() {
86                "mbuf_packet" => {
87                    ty = ZoneType::MbufPacket;
88                    count = 4;
89                }
90                "mbuf_cluster_pack" => {
91                    ty = ZoneType::MbufClusterPack;
92                    count = Uma::BUCKET_MAX;
93                }
94                "mbuf_jumbo_page" => {
95                    ty = ZoneType::MbufJumboPage;
96                    count = 1;
97                }
98                "mbuf" => {
99                    ty = ZoneType::Mbuf;
100                    count = 16;
101                }
102                "mbuf_cluster" => {
103                    ty = ZoneType::MbufCluster;
104                    count = 1;
105                }
106                _ => (),
107            }
108        }
109
110        // Construct uma_zone.
111        let gg = GutexGroup::new();
112        let inherit = UmaFlags::Offpage
113            | UmaFlags::Malloc
114            | UmaFlags::Hash
115            | UmaFlags::RefCnt
116            | UmaFlags::VToSlab
117            | UmaFlags::Bucket
118            | UmaFlags::Internal
119            | UmaFlags::CacheOnly;
120
121        flags |= keg.flags() & inherit;
122
123        Self {
124            bucket_enable,
125            bucket_keys,
126            bucket_zones,
127            ty,
128            size: keg.size(),
129            kegs: gg.clone().spawn(LinkedList::from([keg])),
130            slab: Self::fetch_slab,
131            caches: CpuLocal::new(|_| RefCell::default()),
132            full_buckets: gg.clone().spawn_default(),
133            free_buckets: gg.clone().spawn_default(),
134            alloc_count: gg.clone().spawn_default(),
135            free_count: gg.clone().spawn_default(),
136            count: gg.spawn(count),
137            flags,
138        }
139    }
140
141    pub fn size(&self) -> NonZero<usize> {
142        self.size
143    }
144
145    /// See `uma_zalloc_arg` on the Orbis for a reference.
146    ///
147    /// # Reference offsets
148    /// | Version | Offset |
149    /// |---------|--------|
150    /// |PS4 11.00|0x13E750|
151    pub fn alloc(&self, flags: Alloc) -> *mut u8 {
152        if flags.has_any(Alloc::Wait) {
153            // TODO: The Orbis also modify td_pflags on a certain condition.
154            let td = current_thread();
155
156            if !td.can_sleep() {
157                panic!("attempt to do waitable heap allocation in a non-sleeping context");
158            }
159        }
160
161        loop {
162            // Try allocate from per-CPU cache first so we don't need to acquire a mutex lock.
163            let caches = self.caches.lock();
164            let mem = Self::alloc_from_cache(caches.borrow_mut().deref_mut());
165
166            if !mem.is_null() {
167                return mem;
168            }
169
170            drop(caches); // Exit from non-sleeping context before acquire the mutex.
171
172            // Cache not found, allocate from the zone. We need to re-check the cache again because
173            // we may on a different CPU since we drop the CPU pinning on the above.
174            let mut frees = self.free_buckets.write();
175            let mut count = self.count.write();
176            let caches = self.caches.lock();
177            let mut cache = caches.borrow_mut();
178            let mem = Self::alloc_from_cache(&mut cache);
179
180            if !mem.is_null() {
181                return mem;
182            }
183
184            // TODO: What actually we are doing here?
185            *self.alloc_count.write() += core::mem::take(&mut cache.allocs);
186            *self.free_count.write() += core::mem::take(&mut cache.frees);
187
188            if let Some(b) = cache.alloc.take() {
189                frees.push_front(b);
190            }
191
192            if let Some(b) = self.full_buckets.write().pop_front() {
193                cache.alloc = Some(b);
194
195                // Seems like this should never fail.
196                let m = Self::alloc_from_cache(&mut cache);
197
198                assert!(!m.is_null());
199
200                return m;
201            }
202
203            drop(cache);
204            drop(caches);
205
206            // TODO: What is this?
207            if matches!(
208                self.ty,
209                ZoneType::MbufPacket
210                    | ZoneType::MbufJumboPage
211                    | ZoneType::Mbuf
212                    | ZoneType::MbufCluster
213            ) {
214                if flags.has_any(Alloc::Wait) {
215                    todo!()
216                }
217
218                todo!()
219            }
220
221            // TODO: What is this?
222            if !matches!(
223                self.ty,
224                ZoneType::MbufCluster
225                    | ZoneType::Mbuf
226                    | ZoneType::MbufJumboPage
227                    | ZoneType::MbufPacket
228                    | ZoneType::MbufClusterPack
229            ) && *count < Uma::BUCKET_MAX
230            {
231                *count += 1;
232            }
233
234            if self.alloc_bucket(frees, count, flags) {
235                return self.alloc_item(flags);
236            }
237        }
238    }
239
240    fn alloc_from_cache(c: &mut UmaCache) -> *mut u8 {
241        while let Some(b) = &mut c.alloc {
242            if b.len() != 0 {
243                todo!()
244            }
245
246            if c.free.as_ref().is_some_and(|b| b.len() != 0) {
247                core::mem::swap(&mut c.alloc, &mut c.free);
248                continue;
249            }
250
251            break;
252        }
253
254        null_mut()
255    }
256
257    /// See `zone_alloc_bucket` on the Orbis for a reference.
258    ///
259    /// # Reference offsets
260    /// | Version | Offset |
261    /// |---------|--------|
262    /// |PS4 11.00|0x13EBA0|
263    fn alloc_bucket(
264        &self,
265        frees: GutexWrite<VecDeque<UmaBox<UmaBucket<[BucketItem]>>>>,
266        count: GutexWrite<usize>,
267        flags: Alloc,
268    ) -> bool {
269        match frees.front() {
270            Some(_) => todo!(),
271            None => {
272                if self.bucket_enable.load(Ordering::Relaxed) {
273                    // Get allocation flags.
274                    let mut flags = flags & !Alloc::Zero;
275
276                    if self.flags.has_any(UmaFlags::CacheOnly) {
277                        flags |= Alloc::NoVm;
278                    }
279
280                    // Alloc a bucket.
281                    let i = (*count + 15) >> Uma::BUCKET_SHIFT;
282                    let k = self.bucket_keys[i];
283
284                    self.bucket_zones[k].alloc_item(flags);
285
286                    todo!()
287                }
288            }
289        }
290
291        true
292    }
293
294    /// See `zone_alloc_item` on the Orbis for a reference.
295    ///
296    /// # Reference offsets
297    /// | Version | Offset |
298    /// |---------|--------|
299    /// |PS4 11.00|0x13DD50|
300    fn alloc_item(&self, flags: Alloc) -> *mut u8 {
301        // Get a slab.
302        let slab = (self.slab)(self, None, flags);
303
304        if slab.is_some() {
305            todo!()
306        }
307
308        todo!()
309    }
310
311    /// See `zone_fetch_slab` on the Orbis for a reference.
312    ///
313    /// # Reference offsets
314    /// | Version | Offset |
315    /// |---------|--------|
316    /// |PS4 11.00|0x141DB0|
317    fn fetch_slab(&self, keg: Option<&mut UmaKeg>, flags: Alloc) -> Option<()> {
318        let mut kegs = self.kegs.write();
319        let keg = keg.unwrap_or(kegs.front_mut().unwrap());
320
321        if !keg.flags().has_any(UmaFlags::Bucket) || keg.recurse() == 0 {
322            loop {
323                if let Some(v) = keg.fetch_slab(self, flags) {
324                    return Some(v);
325                }
326
327                if flags.has_any(Alloc::NoWait | Alloc::NoVm) {
328                    break;
329                }
330            }
331        }
332
333        None
334    }
335}
336
337/// Type of [`UmaZone`].
338#[derive(Clone, Copy)]
339enum ZoneType {
340    Other,
341    /// `zone_pack`.
342    MbufPacket,
343    /// `zone_jumbop`.
344    MbufJumboPage,
345    /// `zone_mbuf`.
346    Mbuf,
347    /// `zone_clust`.
348    MbufCluster,
349    /// `zone_clust_pack`.
350    MbufClusterPack,
351}
352
353/// Implementation of `uma_cache` structure.
354#[derive(Default)]
355struct UmaCache {
356    alloc: Option<UmaBox<UmaBucket<[BucketItem]>>>, // uc_allocbucket
357    free: Option<UmaBox<UmaBucket<[BucketItem]>>>,  // uc_freebucket
358    allocs: u64,                                    // uc_allocs
359    frees: u64,                                     // uc_frees
360}