obkrnl/uma/
zone.rs

1use super::bucket::{BucketItem, UmaBucket};
2use super::keg::UmaKeg;
3use super::slab::Slab;
4use super::{Alloc, Uma, UmaBox, UmaFlags};
5use crate::context::{CpuLocal, current_thread};
6use crate::lock::{Mutex, MutexGuard};
7use crate::vm::Vm;
8use alloc::collections::VecDeque;
9use alloc::collections::linked_list::LinkedList;
10use alloc::string::String;
11use alloc::sync::Arc;
12use alloc::vec::Vec;
13use core::cell::RefCell;
14use core::cmp::min;
15use core::num::NonZero;
16use core::ops::DerefMut;
17use core::ptr::{NonNull, null_mut};
18use core::sync::atomic::{AtomicBool, Ordering};
19
20/// Implementation of `uma_zone` structure.
21pub struct UmaZone {
22    bucket_enable: Arc<AtomicBool>,
23    bucket_keys: Arc<Vec<usize>>,
24    bucket_zones: Arc<Vec<UmaZone>>,
25    ty: ZoneType,
26    size: NonZero<usize>, // uz_size
27    slab: fn(&Self, Option<&mut UmaKeg>, Alloc) -> Option<NonNull<Slab<()>>>, // uz_slab
28    caches: CpuLocal<RefCell<UmaCache>>, // uz_cpu
29    flags: UmaFlags,      // uz_flags
30    state: Mutex<ZoneState>,
31}
32
33impl UmaZone {
34    const ALIGN_CACHE: usize = 63; // uma_align_cache
35
36    /// See `zone_ctor` on Orbis for a reference.
37    ///
38    /// # Reference offsets
39    /// | Version | Offset |
40    /// |---------|--------|
41    /// |PS4 11.00|0x13D490|
42    #[allow(clippy::too_many_arguments)] // TODO: Find a better way.
43    pub(super) fn new(
44        vm: Arc<Vm>,
45        bucket_enable: Arc<AtomicBool>,
46        bucket_keys: Arc<Vec<usize>>,
47        bucket_zones: Arc<Vec<UmaZone>>,
48        name: impl Into<String>,
49        keg: Option<UmaKeg>,
50        size: NonZero<usize>,
51        align: Option<usize>,
52        init: Option<fn()>,
53        flags: impl Into<UmaFlags>,
54    ) -> Self {
55        let name = name.into();
56        let flags = flags.into();
57        let (keg, mut flags) = if flags.has_any(UmaFlags::Secondary) {
58            todo!()
59        } else {
60            // We use a different approach here to make it idiomatic to Rust. On Orbis it will
61            // construct a keg here if it is passed from the caller. If not it will allocate a new
62            // keg from masterzone_k.
63            let keg = match keg {
64                Some(v) => v,
65                None => UmaKeg::new(vm, size, align.unwrap_or(Self::ALIGN_CACHE), init, flags),
66            };
67
68            (keg, UmaFlags::zeroed())
69        };
70
71        // Get type and uz_count.
72        let mut ty = ZoneType::Other;
73        let mut count = 0;
74
75        if !keg.flags().has_any(UmaFlags::Internal) {
76            count = if !keg.flags().has_any(UmaFlags::MaxBucket) {
77                min(keg.item_per_slab(), Uma::BUCKET_MAX)
78            } else {
79                Uma::BUCKET_MAX
80            };
81
82            match name.as_str() {
83                "mbuf_packet" => {
84                    ty = ZoneType::MbufPacket;
85                    count = 4;
86                }
87                "mbuf_cluster_pack" => {
88                    ty = ZoneType::MbufClusterPack;
89                    count = Uma::BUCKET_MAX;
90                }
91                "mbuf_jumbo_page" => {
92                    ty = ZoneType::MbufJumboPage;
93                    count = 1;
94                }
95                "mbuf" => {
96                    ty = ZoneType::Mbuf;
97                    count = 16;
98                }
99                "mbuf_cluster" => {
100                    ty = ZoneType::MbufCluster;
101                    count = 1;
102                }
103                _ => (),
104            }
105        }
106
107        // Construct uma_zone.
108        let inherit = UmaFlags::Offpage
109            | UmaFlags::Malloc
110            | UmaFlags::Hash
111            | UmaFlags::RefCnt
112            | UmaFlags::VToSlab
113            | UmaFlags::Bucket
114            | UmaFlags::Internal
115            | UmaFlags::CacheOnly;
116
117        flags |= keg.flags() & inherit;
118
119        Self {
120            bucket_enable,
121            bucket_keys,
122            bucket_zones,
123            ty,
124            size: keg.size(),
125            slab: Self::fetch_slab,
126            caches: CpuLocal::new(|_| RefCell::default()),
127            flags,
128            state: Mutex::new(ZoneState {
129                kegs: LinkedList::from([keg]),
130                full_buckets: VecDeque::default(),
131                free_buckets: VecDeque::default(),
132                alloc_count: 0,
133                free_count: 0,
134                count,
135            }),
136        }
137    }
138
139    pub fn size(&self) -> NonZero<usize> {
140        self.size
141    }
142
143    /// See `uma_zalloc_arg` on the Orbis for a reference.
144    ///
145    /// # Reference offsets
146    /// | Version | Offset |
147    /// |---------|--------|
148    /// |PS4 11.00|0x13E750|
149    pub fn alloc(&self, flags: Alloc) -> *mut u8 {
150        if flags.has_any(Alloc::Wait) {
151            // TODO: The Orbis also modify td_pflags on a certain condition.
152            let td = current_thread();
153
154            if !td.can_sleep() {
155                panic!("attempt to do waitable heap allocation in a non-sleeping context");
156            }
157        }
158
159        loop {
160            // Try allocate from per-CPU cache first so we don't need to acquire a mutex lock.
161            let caches = self.caches.lock();
162            let mem = Self::alloc_from_cache(caches.borrow_mut().deref_mut());
163
164            if !mem.is_null() {
165                return mem;
166            }
167
168            drop(caches); // Exit from non-sleeping context before acquire the mutex.
169
170            // Cache not found, allocate from the zone. We need to re-check the cache again because
171            // we may on a different CPU since we drop the CPU pinning on the above.
172            let mut state = self.state.lock();
173            let caches = self.caches.lock();
174            let mut cache = caches.borrow_mut();
175            let mem = Self::alloc_from_cache(&mut cache);
176
177            if !mem.is_null() {
178                return mem;
179            }
180
181            // TODO: What actually we are doing here?
182            state.alloc_count += core::mem::take(&mut cache.allocs);
183            state.free_count += core::mem::take(&mut cache.frees);
184
185            if let Some(b) = cache.alloc.take() {
186                state.free_buckets.push_front(b);
187            }
188
189            if let Some(b) = state.full_buckets.pop_front() {
190                cache.alloc = Some(b);
191
192                // Seems like this should never fail.
193                let m = Self::alloc_from_cache(&mut cache);
194
195                assert!(!m.is_null());
196
197                return m;
198            }
199
200            drop(cache);
201            drop(caches);
202
203            // TODO: What is this?
204            if matches!(
205                self.ty,
206                ZoneType::MbufPacket
207                    | ZoneType::MbufJumboPage
208                    | ZoneType::Mbuf
209                    | ZoneType::MbufCluster
210            ) {
211                if flags.has_any(Alloc::Wait) {
212                    todo!()
213                }
214
215                todo!()
216            }
217
218            // TODO: What is this?
219            if !matches!(
220                self.ty,
221                ZoneType::MbufCluster
222                    | ZoneType::Mbuf
223                    | ZoneType::MbufJumboPage
224                    | ZoneType::MbufPacket
225                    | ZoneType::MbufClusterPack
226            ) && state.count < Uma::BUCKET_MAX
227            {
228                state.count += 1;
229            }
230
231            if self.alloc_bucket(state, flags) {
232                return self.alloc_item(flags);
233            }
234        }
235    }
236
237    fn alloc_from_cache(c: &mut UmaCache) -> *mut u8 {
238        while let Some(b) = &mut c.alloc {
239            if b.len() != 0 {
240                todo!()
241            }
242
243            if c.free.as_ref().is_some_and(|b| b.len() != 0) {
244                core::mem::swap(&mut c.alloc, &mut c.free);
245                continue;
246            }
247
248            break;
249        }
250
251        null_mut()
252    }
253
254    /// See `zone_alloc_bucket` on the Orbis for a reference.
255    ///
256    /// # Reference offsets
257    /// | Version | Offset |
258    /// |---------|--------|
259    /// |PS4 11.00|0x13EBA0|
260    fn alloc_bucket(&self, state: MutexGuard<ZoneState>, flags: Alloc) -> bool {
261        match state.free_buckets.front() {
262            Some(_) => todo!(),
263            None => {
264                if self.bucket_enable.load(Ordering::Relaxed) {
265                    // Get allocation flags.
266                    let mut flags = flags & !Alloc::Zero;
267
268                    if self.flags.has_any(UmaFlags::CacheOnly) {
269                        flags |= Alloc::NoVm;
270                    }
271
272                    // Alloc a bucket.
273                    let i = (state.count + 15) >> Uma::BUCKET_SHIFT;
274                    let k = self.bucket_keys[i];
275
276                    self.bucket_zones[k].alloc_item(flags);
277
278                    todo!()
279                }
280            }
281        }
282
283        true
284    }
285
286    /// See `zone_alloc_item` on the Orbis for a reference.
287    ///
288    /// # Reference offsets
289    /// | Version | Offset |
290    /// |---------|--------|
291    /// |PS4 11.00|0x13DD50|
292    fn alloc_item(&self, flags: Alloc) -> *mut u8 {
293        // Get a slab.
294        let slab = (self.slab)(self, None, flags);
295
296        if slab.is_some() {
297            todo!()
298        }
299
300        todo!()
301    }
302
303    /// See `zone_fetch_slab` on the Orbis for a reference.
304    ///
305    /// # Reference offsets
306    /// | Version | Offset |
307    /// |---------|--------|
308    /// |PS4 11.00|0x141DB0|
309    fn fetch_slab(&self, keg: Option<&mut UmaKeg>, flags: Alloc) -> Option<NonNull<Slab<()>>> {
310        let mut state = self.state.lock();
311        let keg = keg.unwrap_or(state.kegs.front_mut().unwrap());
312
313        if !keg.flags().has_any(UmaFlags::Bucket) || keg.recurse() == 0 {
314            loop {
315                if let Some(v) = keg.fetch_slab(self, flags) {
316                    return Some(v);
317                }
318
319                if flags.has_any(Alloc::NoWait | Alloc::NoVm) {
320                    break;
321                }
322            }
323        }
324
325        None
326    }
327}
328
329/// Contains mutable data for [UmaZone].
330struct ZoneState {
331    kegs: LinkedList<UmaKeg>, // uz_kegs + uz_klink
332    full_buckets: VecDeque<UmaBox<UmaBucket<[BucketItem]>>>, // uz_full_bucket
333    free_buckets: VecDeque<UmaBox<UmaBucket<[BucketItem]>>>, // uz_free_bucket
334    alloc_count: u64,         // uz_allocs
335    free_count: u64,          // uz_frees
336    count: usize,             // uz_count
337}
338
339/// Type of [`UmaZone`].
340#[derive(Clone, Copy)]
341enum ZoneType {
342    Other,
343    /// `zone_pack`.
344    MbufPacket,
345    /// `zone_jumbop`.
346    MbufJumboPage,
347    /// `zone_mbuf`.
348    Mbuf,
349    /// `zone_clust`.
350    MbufCluster,
351    /// `zone_clust_pack`.
352    MbufClusterPack,
353}
354
355/// Implementation of `uma_cache` structure.
356#[derive(Default)]
357struct UmaCache {
358    alloc: Option<UmaBox<UmaBucket<[BucketItem]>>>, // uc_allocbucket
359    free: Option<UmaBox<UmaBucket<[BucketItem]>>>,  // uc_freebucket
360    allocs: u64,                                    // uc_allocs
361    frees: u64,                                     // uc_frees
362}