1use super::bucket::{BucketItem, UmaBucket};
2use super::keg::UmaKeg;
3use super::{Alloc, Uma, UmaBox, UmaFlags};
4use crate::context::{CpuLocal, current_thread};
5use crate::lock::{Gutex, GutexGroup, GutexWrite};
6use crate::vm::Vm;
7use alloc::collections::VecDeque;
8use alloc::collections::linked_list::LinkedList;
9use alloc::string::String;
10use alloc::sync::Arc;
11use alloc::vec::Vec;
12use core::cell::RefCell;
13use core::cmp::min;
14use core::num::NonZero;
15use core::ops::DerefMut;
16use core::ptr::null_mut;
17use core::sync::atomic::{AtomicBool, Ordering};
18
19pub struct UmaZone {
21 bucket_enable: Arc<AtomicBool>,
22 bucket_keys: Arc<Vec<usize>>,
23 bucket_zones: Arc<Vec<UmaZone>>,
24 ty: ZoneType,
25 size: NonZero<usize>, kegs: Gutex<LinkedList<UmaKeg>>, slab: fn(&Self, Option<&mut UmaKeg>, Alloc) -> Option<()>, caches: CpuLocal<RefCell<UmaCache>>, full_buckets: Gutex<VecDeque<UmaBox<UmaBucket<[BucketItem]>>>>, free_buckets: Gutex<VecDeque<UmaBox<UmaBucket<[BucketItem]>>>>, alloc_count: Gutex<u64>, free_count: Gutex<u64>, count: Gutex<usize>, flags: UmaFlags, }
36
37impl UmaZone {
38 const ALIGN_CACHE: usize = 63; #[allow(clippy::too_many_arguments)] pub(super) fn new(
48 vm: Arc<Vm>,
49 bucket_enable: Arc<AtomicBool>,
50 bucket_keys: Arc<Vec<usize>>,
51 bucket_zones: Arc<Vec<UmaZone>>,
52 name: impl Into<String>,
53 keg: Option<UmaKeg>,
54 size: NonZero<usize>,
55 align: Option<usize>,
56 flags: impl Into<UmaFlags>,
57 ) -> Self {
58 let name = name.into();
59 let flags = flags.into();
60 let (keg, mut flags) = if flags.has_any(UmaFlags::Secondary) {
61 todo!()
62 } else {
63 let keg = match keg {
67 Some(v) => v,
68 None => UmaKeg::new(vm, size, align.unwrap_or(Self::ALIGN_CACHE), flags),
69 };
70
71 (keg, UmaFlags::zeroed())
72 };
73
74 let mut ty = ZoneType::Other;
76 let mut count = 0;
77
78 if !keg.flags().has_any(UmaFlags::Internal) {
79 count = if !keg.flags().has_any(UmaFlags::MaxBucket) {
80 min(keg.item_per_slab(), Uma::BUCKET_MAX)
81 } else {
82 Uma::BUCKET_MAX
83 };
84
85 match name.as_str() {
86 "mbuf_packet" => {
87 ty = ZoneType::MbufPacket;
88 count = 4;
89 }
90 "mbuf_cluster_pack" => {
91 ty = ZoneType::MbufClusterPack;
92 count = Uma::BUCKET_MAX;
93 }
94 "mbuf_jumbo_page" => {
95 ty = ZoneType::MbufJumboPage;
96 count = 1;
97 }
98 "mbuf" => {
99 ty = ZoneType::Mbuf;
100 count = 16;
101 }
102 "mbuf_cluster" => {
103 ty = ZoneType::MbufCluster;
104 count = 1;
105 }
106 _ => (),
107 }
108 }
109
110 let gg = GutexGroup::new();
112 let inherit = UmaFlags::Offpage
113 | UmaFlags::Malloc
114 | UmaFlags::Hash
115 | UmaFlags::RefCnt
116 | UmaFlags::VToSlab
117 | UmaFlags::Bucket
118 | UmaFlags::Internal
119 | UmaFlags::CacheOnly;
120
121 flags |= keg.flags() & inherit;
122
123 Self {
124 bucket_enable,
125 bucket_keys,
126 bucket_zones,
127 ty,
128 size: keg.size(),
129 kegs: gg.clone().spawn(LinkedList::from([keg])),
130 slab: Self::fetch_slab,
131 caches: CpuLocal::new(|_| RefCell::default()),
132 full_buckets: gg.clone().spawn_default(),
133 free_buckets: gg.clone().spawn_default(),
134 alloc_count: gg.clone().spawn_default(),
135 free_count: gg.clone().spawn_default(),
136 count: gg.spawn(count),
137 flags,
138 }
139 }
140
141 pub fn size(&self) -> NonZero<usize> {
142 self.size
143 }
144
145 pub fn alloc(&self, flags: Alloc) -> *mut u8 {
152 if flags.has_any(Alloc::Wait) {
153 let td = current_thread();
155
156 if !td.can_sleep() {
157 panic!("attempt to do waitable heap allocation in a non-sleeping context");
158 }
159 }
160
161 loop {
162 let caches = self.caches.lock();
164 let mem = Self::alloc_from_cache(caches.borrow_mut().deref_mut());
165
166 if !mem.is_null() {
167 return mem;
168 }
169
170 drop(caches); let mut frees = self.free_buckets.write();
175 let mut count = self.count.write();
176 let caches = self.caches.lock();
177 let mut cache = caches.borrow_mut();
178 let mem = Self::alloc_from_cache(&mut cache);
179
180 if !mem.is_null() {
181 return mem;
182 }
183
184 *self.alloc_count.write() += core::mem::take(&mut cache.allocs);
186 *self.free_count.write() += core::mem::take(&mut cache.frees);
187
188 if let Some(b) = cache.alloc.take() {
189 frees.push_front(b);
190 }
191
192 if let Some(b) = self.full_buckets.write().pop_front() {
193 cache.alloc = Some(b);
194
195 let m = Self::alloc_from_cache(&mut cache);
197
198 assert!(!m.is_null());
199
200 return m;
201 }
202
203 drop(cache);
204 drop(caches);
205
206 if matches!(
208 self.ty,
209 ZoneType::MbufPacket
210 | ZoneType::MbufJumboPage
211 | ZoneType::Mbuf
212 | ZoneType::MbufCluster
213 ) {
214 if flags.has_any(Alloc::Wait) {
215 todo!()
216 }
217
218 todo!()
219 }
220
221 if !matches!(
223 self.ty,
224 ZoneType::MbufCluster
225 | ZoneType::Mbuf
226 | ZoneType::MbufJumboPage
227 | ZoneType::MbufPacket
228 | ZoneType::MbufClusterPack
229 ) && *count < Uma::BUCKET_MAX
230 {
231 *count += 1;
232 }
233
234 if self.alloc_bucket(frees, count, flags) {
235 return self.alloc_item(flags);
236 }
237 }
238 }
239
240 fn alloc_from_cache(c: &mut UmaCache) -> *mut u8 {
241 while let Some(b) = &mut c.alloc {
242 if b.len() != 0 {
243 todo!()
244 }
245
246 if c.free.as_ref().is_some_and(|b| b.len() != 0) {
247 core::mem::swap(&mut c.alloc, &mut c.free);
248 continue;
249 }
250
251 break;
252 }
253
254 null_mut()
255 }
256
257 fn alloc_bucket(
264 &self,
265 frees: GutexWrite<VecDeque<UmaBox<UmaBucket<[BucketItem]>>>>,
266 count: GutexWrite<usize>,
267 flags: Alloc,
268 ) -> bool {
269 match frees.front() {
270 Some(_) => todo!(),
271 None => {
272 if self.bucket_enable.load(Ordering::Relaxed) {
273 let mut flags = flags & !Alloc::Zero;
275
276 if self.flags.has_any(UmaFlags::CacheOnly) {
277 flags |= Alloc::NoVm;
278 }
279
280 let i = (*count + 15) >> Uma::BUCKET_SHIFT;
282 let k = self.bucket_keys[i];
283
284 self.bucket_zones[k].alloc_item(flags);
285
286 todo!()
287 }
288 }
289 }
290
291 true
292 }
293
294 fn alloc_item(&self, flags: Alloc) -> *mut u8 {
301 let slab = (self.slab)(self, None, flags);
303
304 if slab.is_some() {
305 todo!()
306 }
307
308 todo!()
309 }
310
311 fn fetch_slab(&self, keg: Option<&mut UmaKeg>, flags: Alloc) -> Option<()> {
318 let mut kegs = self.kegs.write();
319 let keg = keg.unwrap_or(kegs.front_mut().unwrap());
320
321 if !keg.flags().has_any(UmaFlags::Bucket) || keg.recurse() == 0 {
322 loop {
323 if let Some(v) = keg.fetch_slab(self, flags) {
324 return Some(v);
325 }
326
327 if flags.has_any(Alloc::NoWait | Alloc::NoVm) {
328 break;
329 }
330 }
331 }
332
333 None
334 }
335}
336
337#[derive(Clone, Copy)]
339enum ZoneType {
340 Other,
341 MbufPacket,
343 MbufJumboPage,
345 Mbuf,
347 MbufCluster,
349 MbufClusterPack,
351}
352
353#[derive(Default)]
355struct UmaCache {
356 alloc: Option<UmaBox<UmaBucket<[BucketItem]>>>, free: Option<UmaBox<UmaBucket<[BucketItem]>>>, allocs: u64, frees: u64, }