1use super::bucket::{BucketItem, UmaBucket};
2use super::{Alloc, FreeItem, Slab, StdFree, Uma, UmaBox, UmaFlags, UmaKeg};
3use crate::context::{CpuLocal, current_thread};
4use crate::lock::{Mutex, MutexGuard};
5use crate::vm::Vm;
6use alloc::collections::VecDeque;
7use alloc::collections::linked_list::LinkedList;
8use alloc::string::String;
9use alloc::sync::Arc;
10use alloc::vec::Vec;
11use core::cell::RefCell;
12use core::cmp::min;
13use core::num::NonZero;
14use core::ops::DerefMut;
15use core::ptr::{NonNull, null_mut};
16use core::sync::atomic::{AtomicBool, Ordering};
17
18pub struct UmaZone<T> {
20 bucket_enable: Arc<AtomicBool>,
21 bucket_keys: Arc<Vec<usize>>,
22 bucket_zones: Arc<Vec<UmaZone<StdFree>>>,
23 ty: ZoneType,
24 size: NonZero<usize>, slab: fn(&Self, Option<&Arc<UmaKeg<T>>>, Alloc) -> Option<NonNull<Slab<T>>>, caches: CpuLocal<RefCell<UmaCache>>, flags: UmaFlags, state: Mutex<ZoneState<T>>,
29}
30
31impl<T: FreeItem> UmaZone<T> {
32 const ALIGN_CACHE: usize = 63; #[allow(clippy::too_many_arguments)] pub(super) fn new(
42 vm: Arc<Vm>,
43 bucket_enable: Arc<AtomicBool>,
44 bucket_keys: Arc<Vec<usize>>,
45 bucket_zones: Arc<Vec<UmaZone<StdFree>>>,
46 name: impl Into<String>,
47 keg: Option<UmaKeg<T>>,
48 size: NonZero<usize>,
49 align: Option<usize>,
50 init: Option<fn()>,
51 flags: impl Into<UmaFlags>,
52 ) -> Self {
53 let name = name.into();
54 let flags = flags.into();
55 let (keg, mut flags) = if flags.has_any(UmaFlags::Secondary) {
56 todo!()
57 } else {
58 let keg = match keg {
62 Some(v) => v,
63 None => UmaKeg::new(vm, size, align.unwrap_or(Self::ALIGN_CACHE), init, flags),
64 };
65
66 (keg, UmaFlags::zeroed())
67 };
68
69 let mut ty = ZoneType::Other;
71 let mut count = 0;
72
73 if !keg.flags().has_any(UmaFlags::Internal) {
74 count = if !keg.flags().has_any(UmaFlags::MaxBucket) {
75 min(keg.item_per_slab(), Uma::BUCKET_MAX)
76 } else {
77 Uma::BUCKET_MAX
78 };
79
80 match name.as_str() {
81 "mbuf_packet" => {
82 ty = ZoneType::MbufPacket;
83 count = 4;
84 }
85 "mbuf_cluster_pack" => {
86 ty = ZoneType::MbufClusterPack;
87 count = Uma::BUCKET_MAX;
88 }
89 "mbuf_jumbo_page" => {
90 ty = ZoneType::MbufJumboPage;
91 count = 1;
92 }
93 "mbuf" => {
94 ty = ZoneType::Mbuf;
95 count = 16;
96 }
97 "mbuf_cluster" => {
98 ty = ZoneType::MbufCluster;
99 count = 1;
100 }
101 _ => (),
102 }
103 }
104
105 let inherit = UmaFlags::Offpage
107 | UmaFlags::Malloc
108 | UmaFlags::Hash
109 | UmaFlags::VToSlab
110 | UmaFlags::Bucket
111 | UmaFlags::Internal
112 | UmaFlags::CacheOnly;
113
114 flags |= keg.flags() & inherit;
115
116 Self {
117 bucket_enable,
118 bucket_keys,
119 bucket_zones,
120 ty,
121 size: keg.size(),
122 slab: Self::fetch_slab,
123 caches: CpuLocal::new(|_| RefCell::default()),
124 flags,
125 state: Mutex::new(ZoneState {
126 kegs: LinkedList::from([Arc::new(keg)]),
127 full_buckets: VecDeque::default(),
128 free_buckets: VecDeque::default(),
129 alloc_count: 0,
130 free_count: 0,
131 count,
132 }),
133 }
134 }
135}
136
137impl<T> UmaZone<T> {
138 pub fn size(&self) -> NonZero<usize> {
139 self.size
140 }
141
142 pub fn alloc(&self, flags: Alloc) -> *mut u8 {
149 if flags.has_any(Alloc::Wait) {
150 let td = current_thread();
152
153 if !td.can_sleep() {
154 panic!("attempt to do waitable heap allocation in a non-sleeping context");
155 }
156 }
157
158 loop {
159 let caches = self.caches.lock();
161 let mem = Self::alloc_from_cache(caches.borrow_mut().deref_mut());
162
163 if !mem.is_null() {
164 return mem;
165 }
166
167 drop(caches); let mut state = self.state.lock();
172 let caches = self.caches.lock();
173 let mut cache = caches.borrow_mut();
174 let mem = Self::alloc_from_cache(&mut cache);
175
176 if !mem.is_null() {
177 return mem;
178 }
179
180 state.alloc_count += core::mem::take(&mut cache.allocs);
182 state.free_count += core::mem::take(&mut cache.frees);
183
184 if let Some(b) = cache.alloc.take() {
185 state.free_buckets.push_front(b);
186 }
187
188 if let Some(b) = state.full_buckets.pop_front() {
189 cache.alloc = Some(b);
190
191 let m = Self::alloc_from_cache(&mut cache);
193
194 assert!(!m.is_null());
195
196 return m;
197 }
198
199 drop(cache);
200 drop(caches);
201
202 if matches!(
204 self.ty,
205 ZoneType::MbufPacket
206 | ZoneType::MbufJumboPage
207 | ZoneType::Mbuf
208 | ZoneType::MbufCluster
209 ) {
210 if flags.has_any(Alloc::Wait) {
211 todo!()
212 }
213
214 todo!()
215 }
216
217 if !matches!(
219 self.ty,
220 ZoneType::MbufCluster
221 | ZoneType::Mbuf
222 | ZoneType::MbufJumboPage
223 | ZoneType::MbufPacket
224 | ZoneType::MbufClusterPack
225 ) && state.count < Uma::BUCKET_MAX
226 {
227 state.count += 1;
228 }
229
230 if self.alloc_bucket(state, flags) {
231 return self.alloc_item(flags);
232 }
233 }
234 }
235
236 fn alloc_from_cache(c: &mut UmaCache) -> *mut u8 {
237 while let Some(b) = &mut c.alloc {
238 if b.len() != 0 {
239 todo!()
240 }
241
242 if c.free.as_ref().is_some_and(|b| b.len() != 0) {
243 core::mem::swap(&mut c.alloc, &mut c.free);
244 continue;
245 }
246
247 break;
248 }
249
250 null_mut()
251 }
252
253 fn alloc_bucket(&self, state: MutexGuard<ZoneState<T>>, flags: Alloc) -> bool {
260 match state.free_buckets.front() {
261 Some(_) => todo!(),
262 None => {
263 if self.bucket_enable.load(Ordering::Relaxed) {
264 let mut flags = flags & !Alloc::Zero;
266
267 if self.flags.has_any(UmaFlags::CacheOnly) {
268 flags |= Alloc::NoVm;
269 }
270
271 let i = (state.count + 15) >> Uma::BUCKET_SHIFT;
273 let k = self.bucket_keys[i];
274
275 self.bucket_zones[k].alloc_item(flags);
276
277 todo!()
278 }
279 }
280 }
281
282 true
283 }
284
285 fn alloc_item(&self, flags: Alloc) -> *mut u8 {
292 let slab = (self.slab)(self, None, flags);
294
295 if let Some(mut slab) = slab {
296 unsafe { slab.as_mut().alloc_item() };
297
298 todo!()
299 }
300
301 todo!()
302 }
303}
304
305impl<T: FreeItem> UmaZone<T> {
306 fn fetch_slab(&self, keg: Option<&Arc<UmaKeg<T>>>, flags: Alloc) -> Option<NonNull<Slab<T>>> {
313 let state = self.state.lock();
314 let keg = keg.unwrap_or(state.kegs.front().unwrap());
315
316 if !keg.flags().has_any(UmaFlags::Bucket) || keg.recurse() == 0 {
317 loop {
318 if let Some(v) = keg.fetch_slab(flags) {
319 return Some(v);
320 }
321
322 if flags.has_any(Alloc::NoWait | Alloc::NoVm) {
323 break;
324 }
325 }
326 }
327
328 None
329 }
330}
331
332struct ZoneState<T> {
334 kegs: LinkedList<Arc<UmaKeg<T>>>, full_buckets: VecDeque<UmaBox<UmaBucket<[BucketItem]>>>, free_buckets: VecDeque<UmaBox<UmaBucket<[BucketItem]>>>, alloc_count: u64, free_count: u64, count: usize, }
341
342#[derive(Clone, Copy)]
344enum ZoneType {
345 Other,
346 MbufPacket,
348 MbufJumboPage,
350 Mbuf,
352 MbufCluster,
354 MbufClusterPack,
356}
357
358#[derive(Default)]
360struct UmaCache {
361 alloc: Option<UmaBox<UmaBucket<[BucketItem]>>>, free: Option<UmaBox<UmaBucket<[BucketItem]>>>, allocs: u64, frees: u64, }