obkrnl/uma/
zone.rs

1use super::bucket::{BucketItem, UmaBucket};
2use super::{Alloc, FreeItem, Slab, StdFree, Uma, UmaBox, UmaFlags, UmaKeg};
3use crate::context::{CpuLocal, current_thread};
4use crate::lock::{Mutex, MutexGuard};
5use crate::vm::Vm;
6use alloc::collections::VecDeque;
7use alloc::collections::linked_list::LinkedList;
8use alloc::string::String;
9use alloc::sync::Arc;
10use alloc::vec::Vec;
11use core::cell::RefCell;
12use core::cmp::min;
13use core::num::NonZero;
14use core::ops::DerefMut;
15use core::ptr::{NonNull, null_mut};
16use core::sync::atomic::{AtomicBool, Ordering};
17
18/// Implementation of `uma_zone` structure.
19pub struct UmaZone<T> {
20    bucket_enable: Arc<AtomicBool>,
21    bucket_keys: Arc<Vec<usize>>,
22    bucket_zones: Arc<Vec<UmaZone<StdFree>>>,
23    ty: ZoneType,
24    size: NonZero<usize>, // uz_size
25    slab: fn(&Self, Option<&Arc<UmaKeg<T>>>, Alloc) -> Option<NonNull<Slab<T>>>, // uz_slab
26    caches: CpuLocal<RefCell<UmaCache>>, // uz_cpu
27    flags: UmaFlags,      // uz_flags
28    state: Mutex<ZoneState<T>>,
29}
30
31impl<T: FreeItem> UmaZone<T> {
32    const ALIGN_CACHE: usize = 63; // uma_align_cache
33
34    /// See `zone_ctor` on Orbis for a reference.
35    ///
36    /// # Reference offsets
37    /// | Version | Offset |
38    /// |---------|--------|
39    /// |PS4 11.00|0x13D490|
40    #[allow(clippy::too_many_arguments)] // TODO: Find a better way.
41    pub(super) fn new(
42        vm: Arc<Vm>,
43        bucket_enable: Arc<AtomicBool>,
44        bucket_keys: Arc<Vec<usize>>,
45        bucket_zones: Arc<Vec<UmaZone<StdFree>>>,
46        name: impl Into<String>,
47        keg: Option<UmaKeg<T>>,
48        size: NonZero<usize>,
49        align: Option<usize>,
50        init: Option<fn()>,
51        flags: impl Into<UmaFlags>,
52    ) -> Self {
53        let name = name.into();
54        let flags = flags.into();
55        let (keg, mut flags) = if flags.has_any(UmaFlags::Secondary) {
56            todo!()
57        } else {
58            // We use a different approach here to make it idiomatic to Rust. On Orbis it will
59            // construct a keg here if it is passed from the caller. If not it will allocate a new
60            // keg from masterzone_k.
61            let keg = match keg {
62                Some(v) => v,
63                None => UmaKeg::new(vm, size, align.unwrap_or(Self::ALIGN_CACHE), init, flags),
64            };
65
66            (keg, UmaFlags::zeroed())
67        };
68
69        // Get type and uz_count.
70        let mut ty = ZoneType::Other;
71        let mut count = 0;
72
73        if !keg.flags().has_any(UmaFlags::Internal) {
74            count = if !keg.flags().has_any(UmaFlags::MaxBucket) {
75                min(keg.item_per_slab(), Uma::BUCKET_MAX)
76            } else {
77                Uma::BUCKET_MAX
78            };
79
80            match name.as_str() {
81                "mbuf_packet" => {
82                    ty = ZoneType::MbufPacket;
83                    count = 4;
84                }
85                "mbuf_cluster_pack" => {
86                    ty = ZoneType::MbufClusterPack;
87                    count = Uma::BUCKET_MAX;
88                }
89                "mbuf_jumbo_page" => {
90                    ty = ZoneType::MbufJumboPage;
91                    count = 1;
92                }
93                "mbuf" => {
94                    ty = ZoneType::Mbuf;
95                    count = 16;
96                }
97                "mbuf_cluster" => {
98                    ty = ZoneType::MbufCluster;
99                    count = 1;
100                }
101                _ => (),
102            }
103        }
104
105        // Construct uma_zone.
106        let inherit = UmaFlags::Offpage
107            | UmaFlags::Malloc
108            | UmaFlags::Hash
109            | UmaFlags::VToSlab
110            | UmaFlags::Bucket
111            | UmaFlags::Internal
112            | UmaFlags::CacheOnly;
113
114        flags |= keg.flags() & inherit;
115
116        Self {
117            bucket_enable,
118            bucket_keys,
119            bucket_zones,
120            ty,
121            size: keg.size(),
122            slab: Self::fetch_slab,
123            caches: CpuLocal::new(|_| RefCell::default()),
124            flags,
125            state: Mutex::new(ZoneState {
126                kegs: LinkedList::from([Arc::new(keg)]),
127                full_buckets: VecDeque::default(),
128                free_buckets: VecDeque::default(),
129                alloc_count: 0,
130                free_count: 0,
131                count,
132            }),
133        }
134    }
135}
136
137impl<T> UmaZone<T> {
138    pub fn size(&self) -> NonZero<usize> {
139        self.size
140    }
141
142    /// See `uma_zalloc_arg` on the Orbis for a reference.
143    ///
144    /// # Reference offsets
145    /// | Version | Offset |
146    /// |---------|--------|
147    /// |PS4 11.00|0x13E750|
148    pub fn alloc(&self, flags: Alloc) -> *mut u8 {
149        if flags.has_any(Alloc::Wait) {
150            // TODO: The Orbis also modify td_pflags on a certain condition.
151            let td = current_thread();
152
153            if !td.can_sleep() {
154                panic!("attempt to do waitable heap allocation in a non-sleeping context");
155            }
156        }
157
158        loop {
159            // Try allocate from per-CPU cache first so we don't need to acquire a mutex lock.
160            let caches = self.caches.lock();
161            let mem = Self::alloc_from_cache(caches.borrow_mut().deref_mut());
162
163            if !mem.is_null() {
164                return mem;
165            }
166
167            drop(caches); // Exit from non-sleeping context before acquire the mutex.
168
169            // Cache not found, allocate from the zone. We need to re-check the cache again because
170            // we may on a different CPU since we drop the CPU pinning on the above.
171            let mut state = self.state.lock();
172            let caches = self.caches.lock();
173            let mut cache = caches.borrow_mut();
174            let mem = Self::alloc_from_cache(&mut cache);
175
176            if !mem.is_null() {
177                return mem;
178            }
179
180            // TODO: What actually we are doing here?
181            state.alloc_count += core::mem::take(&mut cache.allocs);
182            state.free_count += core::mem::take(&mut cache.frees);
183
184            if let Some(b) = cache.alloc.take() {
185                state.free_buckets.push_front(b);
186            }
187
188            if let Some(b) = state.full_buckets.pop_front() {
189                cache.alloc = Some(b);
190
191                // Seems like this should never fail.
192                let m = Self::alloc_from_cache(&mut cache);
193
194                assert!(!m.is_null());
195
196                return m;
197            }
198
199            drop(cache);
200            drop(caches);
201
202            // TODO: What is this?
203            if matches!(
204                self.ty,
205                ZoneType::MbufPacket
206                    | ZoneType::MbufJumboPage
207                    | ZoneType::Mbuf
208                    | ZoneType::MbufCluster
209            ) {
210                if flags.has_any(Alloc::Wait) {
211                    todo!()
212                }
213
214                todo!()
215            }
216
217            // TODO: What is this?
218            if !matches!(
219                self.ty,
220                ZoneType::MbufCluster
221                    | ZoneType::Mbuf
222                    | ZoneType::MbufJumboPage
223                    | ZoneType::MbufPacket
224                    | ZoneType::MbufClusterPack
225            ) && state.count < Uma::BUCKET_MAX
226            {
227                state.count += 1;
228            }
229
230            if self.alloc_bucket(state, flags) {
231                return self.alloc_item(flags);
232            }
233        }
234    }
235
236    fn alloc_from_cache(c: &mut UmaCache) -> *mut u8 {
237        while let Some(b) = &mut c.alloc {
238            if b.len() != 0 {
239                todo!()
240            }
241
242            if c.free.as_ref().is_some_and(|b| b.len() != 0) {
243                core::mem::swap(&mut c.alloc, &mut c.free);
244                continue;
245            }
246
247            break;
248        }
249
250        null_mut()
251    }
252
253    /// See `zone_alloc_bucket` on the Orbis for a reference.
254    ///
255    /// # Reference offsets
256    /// | Version | Offset |
257    /// |---------|--------|
258    /// |PS4 11.00|0x13EBA0|
259    fn alloc_bucket(&self, state: MutexGuard<ZoneState<T>>, flags: Alloc) -> bool {
260        match state.free_buckets.front() {
261            Some(_) => todo!(),
262            None => {
263                if self.bucket_enable.load(Ordering::Relaxed) {
264                    // Get allocation flags.
265                    let mut flags = flags & !Alloc::Zero;
266
267                    if self.flags.has_any(UmaFlags::CacheOnly) {
268                        flags |= Alloc::NoVm;
269                    }
270
271                    // Alloc a bucket.
272                    let i = (state.count + 15) >> Uma::BUCKET_SHIFT;
273                    let k = self.bucket_keys[i];
274
275                    self.bucket_zones[k].alloc_item(flags);
276
277                    todo!()
278                }
279            }
280        }
281
282        true
283    }
284
285    /// See `zone_alloc_item` on the Orbis for a reference.
286    ///
287    /// # Reference offsets
288    /// | Version | Offset |
289    /// |---------|--------|
290    /// |PS4 11.00|0x13DD50|
291    fn alloc_item(&self, flags: Alloc) -> *mut u8 {
292        // Get a slab.
293        let slab = (self.slab)(self, None, flags);
294
295        if let Some(mut slab) = slab {
296            unsafe { slab.as_mut().alloc_item() };
297
298            todo!()
299        }
300
301        todo!()
302    }
303}
304
305impl<T: FreeItem> UmaZone<T> {
306    /// See `zone_fetch_slab` on the Orbis for a reference.
307    ///
308    /// # Reference offsets
309    /// | Version | Offset |
310    /// |---------|--------|
311    /// |PS4 11.00|0x141DB0|
312    fn fetch_slab(&self, keg: Option<&Arc<UmaKeg<T>>>, flags: Alloc) -> Option<NonNull<Slab<T>>> {
313        let state = self.state.lock();
314        let keg = keg.unwrap_or(state.kegs.front().unwrap());
315
316        if !keg.flags().has_any(UmaFlags::Bucket) || keg.recurse() == 0 {
317            loop {
318                if let Some(v) = keg.fetch_slab(flags) {
319                    return Some(v);
320                }
321
322                if flags.has_any(Alloc::NoWait | Alloc::NoVm) {
323                    break;
324                }
325            }
326        }
327
328        None
329    }
330}
331
332/// Contains mutable data for [UmaZone].
333struct ZoneState<T> {
334    kegs: LinkedList<Arc<UmaKeg<T>>>, // uz_kegs + uz_klink
335    full_buckets: VecDeque<UmaBox<UmaBucket<[BucketItem]>>>, // uz_full_bucket
336    free_buckets: VecDeque<UmaBox<UmaBucket<[BucketItem]>>>, // uz_free_bucket
337    alloc_count: u64,                 // uz_allocs
338    free_count: u64,                  // uz_frees
339    count: usize,                     // uz_count
340}
341
342/// Type of [`UmaZone`].
343#[derive(Clone, Copy)]
344enum ZoneType {
345    Other,
346    /// `zone_pack`.
347    MbufPacket,
348    /// `zone_jumbop`.
349    MbufJumboPage,
350    /// `zone_mbuf`.
351    Mbuf,
352    /// `zone_clust`.
353    MbufCluster,
354    /// `zone_clust_pack`.
355    MbufClusterPack,
356}
357
358/// Implementation of `uma_cache` structure.
359#[derive(Default)]
360struct UmaCache {
361    alloc: Option<UmaBox<UmaBucket<[BucketItem]>>>, // uc_allocbucket
362    free: Option<UmaBox<UmaBucket<[BucketItem]>>>,  // uc_freebucket
363    allocs: u64,                                    // uc_allocs
364    frees: u64,                                     // uc_frees
365}