1use super::arch::small_alloc;
2use super::slab::{Free, RcFree, Slab};
3use super::{Alloc, Uma, UmaFlags};
4use crate::config::{PAGE_MASK, PAGE_SHIFT, PAGE_SIZE};
5use crate::lock::Mutex;
6use crate::vm::Vm;
7use alloc::collections::vec_deque::VecDeque;
8use alloc::sync::Arc;
9use core::alloc::Layout;
10use core::cmp::{max, min};
11use core::mem::MaybeUninit;
12use core::num::NonZero;
13use core::ptr::NonNull;
14use core::sync::atomic::{AtomicU32, Ordering};
15
16pub struct UmaKeg {
18 vm: Arc<Vm>,
19 size: NonZero<usize>, pgoff: usize, ppera: usize, ipers: usize, alloc: fn(&Vm, Alloc) -> *mut u8, init: Option<fn()>, max_pages: usize, recurse: AtomicU32, flags: UmaFlags, state: Mutex<KegState>,
29}
30
31impl UmaKeg {
32 pub(super) fn new(
42 vm: Arc<Vm>,
43 size: NonZero<usize>,
44 align: usize,
45 init: Option<fn()>,
46 mut flags: UmaFlags,
47 ) -> Self {
48 if flags.has_any(UmaFlags::Vm) {
49 todo!()
50 }
51
52 if flags.has_any(UmaFlags::ZInit) {
53 todo!()
54 }
55
56 if flags.has_any(UmaFlags::Malloc | UmaFlags::RefCnt) {
57 flags |= UmaFlags::VToSlab;
58 }
59
60 let hdr = Layout::new::<Slab<()>>();
62 let (mut hdr, off) = if flags.has_any(UmaFlags::RefCnt) {
63 hdr.extend(Layout::new::<RcFree>()).unwrap()
64 } else {
65 hdr.extend(Layout::new::<Free>()).unwrap()
66 };
67
68 hdr = hdr.pad_to_align();
69
70 let free_item = hdr.size() - off;
72 let available = PAGE_SIZE.get() - hdr.size();
73
74 let (ppera, ipers) = if flags.has_any(UmaFlags::CacheSpread) {
76 let rsize = if (size.get() & align) == 0 {
78 size.get()
79 } else {
80 (size.get() & !align) + align + 1
81 };
82
83 let align = align + 1;
85 let rsize = if (rsize & align) == 0 {
86 rsize + align
88 } else {
89 rsize
90 };
91
92 let pages = (PAGE_SIZE.get() / align * rsize) >> PAGE_SHIFT;
94 let ppera = min(pages, (128 * 1024) / PAGE_SIZE);
95
96 let ipers = (ppera * PAGE_SIZE.get() + (rsize - size.get())) / rsize;
98
99 (ppera, ipers)
100 } else {
101 if (size.get() + free_item) > available {
103 if !flags.has_any(UmaFlags::Internal) {
105 flags |= UmaFlags::Offpage;
106
107 if !flags.has_any(UmaFlags::VToSlab) {
108 flags |= UmaFlags::Hash;
109 }
110 }
111
112 let mut ppera = size.get() >> PAGE_SHIFT;
114
115 if size.get() > (size.get() & !PAGE_MASK.get()) {
116 ppera += 1;
117 }
118
119 (ppera, 1)
120 } else {
121 let rsize = max(size, Uma::SMALLEST_UNIT);
123 let rsize = if (align & rsize.get()) == 0 {
124 rsize.get()
125 } else {
126 align + 1 + (!align & rsize.get())
128 };
129
130 let mut ipers = available / (rsize + free_item);
132
133 if !flags.has_any(UmaFlags::Internal | UmaFlags::CacheOnly)
135 && (available % (rsize + free_item)) >= Uma::MAX_WASTE.get()
136 && (PAGE_SIZE.get() / rsize) > ipers
137 {
138 ipers = PAGE_SIZE.get() / rsize;
139
140 if flags.has_any(UmaFlags::VToSlab) {
141 flags |= UmaFlags::Offpage;
142 } else {
143 flags |= UmaFlags::Offpage | UmaFlags::Hash;
144 }
145 }
146
147 (1, ipers)
148 }
149 };
150
151 if flags.has_any(UmaFlags::Offpage) {
152 if flags.has_any(UmaFlags::RefCnt) {
153 } else {
155 }
157 }
158
159 let alloc = if ppera == 1 {
161 small_alloc
163 } else {
164 Self::page_alloc
165 };
166
167 if flags.has_any(UmaFlags::MtxClass) {
168 todo!()
169 }
170
171 let mut pgoff = 0;
173
174 if !flags.has_any(UmaFlags::Offpage) {
175 let space = ppera * PAGE_SIZE.get();
176
177 pgoff = (space - hdr.size()) - ipers * free_item;
178
179 if space < pgoff + hdr.size() + ipers * free_item {
181 panic!("UMA slab won't fit");
182 }
183 }
184
185 if flags.has_any(UmaFlags::Hash) {
186 todo!()
187 }
188
189 Self {
192 vm,
193 size,
194 pgoff,
195 ppera,
196 ipers,
197 alloc,
198 init,
199 max_pages: 0,
200 recurse: AtomicU32::new(0),
201 flags,
202 state: Mutex::new(KegState {
203 pages: 0,
204 free: 0,
205 partial_slabs: VecDeque::new(),
206 }),
207 }
208 }
209
210 pub fn size(&self) -> NonZero<usize> {
211 self.size
212 }
213
214 pub fn item_per_slab(&self) -> usize {
215 self.ipers
216 }
217
218 pub fn recurse(&self) -> u32 {
219 self.recurse.load(Ordering::Relaxed)
220 }
221
222 pub fn flags(&self) -> UmaFlags {
223 self.flags
224 }
225
226 pub fn fetch_slab(self: &Arc<Self>, mut flags: Alloc) -> Option<NonNull<Slab<()>>> {
236 let mut state = self.state.lock();
237
238 while state.free == 0 {
239 if flags.has_any(Alloc::NoVm) {
240 return None;
241 }
242
243 #[allow(clippy::while_immutable_condition)] while self.max_pages != 0 && self.max_pages <= state.pages {
245 todo!()
246 }
247
248 self.recurse.fetch_add(1, Ordering::Relaxed);
249 let slab = self.alloc_slab(&mut state, flags);
250 self.recurse.fetch_sub(1, Ordering::Relaxed);
251
252 if let Some(slab) = NonNull::new(slab) {
253 state.partial_slabs.push_front(slab);
254 return Some(slab);
255 }
256
257 flags |= Alloc::NoVm;
258 }
259
260 todo!()
261 }
262
263 fn alloc_slab(self: &Arc<Self>, state: &mut KegState, flags: Alloc) -> *mut Slab<()> {
270 let mut slab: *mut Slab<()>;
271
272 if self.flags.has_any(UmaFlags::Offpage) {
273 todo!()
274 } else {
275 let flags = if self.flags.has_any(UmaFlags::Malloc) {
277 flags & !Alloc::Zero
278 } else {
279 flags | Alloc::Zero
280 };
281
282 slab = (self.alloc)(&self.vm, flags).cast();
284
285 if !slab.is_null() {
286 slab = unsafe { slab.byte_add(self.pgoff) };
289
290 if self.flags.has_any(UmaFlags::VToSlab) && self.ppera != 0 {
291 todo!()
292 }
293
294 if self.flags.has_any(UmaFlags::RefCnt) {
296 todo!()
297 } else if self.ipers == 0 {
298 todo!()
299 } else {
300 let slab = core::ptr::slice_from_raw_parts_mut(slab, self.ipers);
301 let slab = slab as *mut Slab<[MaybeUninit<Free>]>;
302
303 for (i, f) in unsafe { (*slab).free.iter_mut().enumerate() } {
304 f.write(Free {
305 item: (i + 1).try_into().unwrap(),
306 });
307 }
308 }
309
310 if self.init.is_some() {
311 todo!()
312 }
313
314 if self.flags.has_any(UmaFlags::Hash) {
315 todo!()
316 }
317
318 state.pages += self.ppera;
319 state.free += self.ipers;
320
321 return slab;
322 }
323
324 todo!()
325 }
326 }
327
328 fn page_alloc(_: &Vm, _: Alloc) -> *mut u8 {
335 todo!()
336 }
337}
338
339unsafe impl Send for UmaKeg {}
340unsafe impl Sync for UmaKeg {}
341
342struct KegState {
344 pages: usize, free: usize, partial_slabs: VecDeque<NonNull<Slab<()>>>, }