1use super::arch::small_alloc;
2use super::{Alloc, FreeItem, Slab, SlabHdr, Uma, UmaFlags};
3use crate::config::{PAGE_MASK, PAGE_SHIFT, PAGE_SIZE};
4use crate::lock::Mutex;
5use crate::vm::Vm;
6use alloc::collections::vec_deque::VecDeque;
7use alloc::sync::Arc;
8use core::alloc::Layout;
9use core::cmp::{max, min};
10use core::num::NonZero;
11use core::ptr::NonNull;
12use core::sync::atomic::{AtomicU32, Ordering};
13
14pub struct UmaKeg<T> {
16 vm: Arc<Vm>,
17 size: NonZero<usize>, rsize: usize, pgoff: usize, ppera: usize, ipers: usize, alloc: fn(&Vm, Alloc) -> *mut u8, init: Option<fn()>, max_pages: usize, recurse: AtomicU32, flags: UmaFlags, state: Mutex<KegState<T>>,
28}
29
30impl<T: FreeItem> UmaKeg<T> {
31 pub(super) fn new(
41 vm: Arc<Vm>,
42 size: NonZero<usize>,
43 align: usize,
44 init: Option<fn()>,
45 mut flags: UmaFlags,
46 ) -> Self {
47 if flags.has_any(UmaFlags::Vm) {
48 todo!()
49 }
50
51 if flags.has_any(UmaFlags::ZInit) {
52 todo!()
53 }
54
55 if flags.has_any(UmaFlags::Malloc) {
56 flags |= UmaFlags::VToSlab;
57 }
58
59 flags |= T::flags();
60
61 let hdr = Layout::new::<SlabHdr<T>>();
63 let (mut hdr, off) = hdr.extend(Layout::new::<T>()).unwrap();
64
65 hdr = hdr.pad_to_align();
66
67 let free_item = hdr.size() - off;
69 let available = PAGE_SIZE.get() - hdr.size();
70
71 let (rsize, ppera, ipers) = if flags.has_any(UmaFlags::CacheSpread) {
73 let rsize = size.get().next_multiple_of(align + 1);
75 let align = align + 1;
76 let rsize = if (rsize & align) == 0 {
77 rsize + align
79 } else {
80 rsize
81 };
82
83 let pages = (PAGE_SIZE.get() / align * rsize) >> PAGE_SHIFT;
85 let ppera = min(pages, (128 * 1024) / PAGE_SIZE);
86
87 let ipers = (ppera * PAGE_SIZE.get() + (rsize - size.get())) / rsize;
89
90 (rsize, ppera, ipers)
91 } else {
92 if (size.get() + free_item) > available {
94 if !flags.has_any(UmaFlags::Internal) {
96 flags |= UmaFlags::Offpage;
97
98 if !flags.has_any(UmaFlags::VToSlab) {
99 flags |= UmaFlags::Hash;
100 }
101 }
102
103 let mut ppera = size.get() >> PAGE_SHIFT;
105
106 if size.get() > (size.get() & !PAGE_MASK.get()) {
107 ppera += 1;
108 }
109
110 (size.get(), ppera, 1)
111 } else {
112 let rsize = max(size, Uma::SMALLEST_UNIT);
114 let rsize = rsize.get().next_multiple_of(align + 1);
115
116 let mut ipers = available / (rsize + free_item);
118
119 if !flags.has_any(UmaFlags::Internal | UmaFlags::CacheOnly)
121 && (available % (rsize + free_item)) >= Uma::MAX_WASTE.get()
122 && (PAGE_SIZE.get() / rsize) > ipers
123 {
124 ipers = PAGE_SIZE.get() / rsize;
125
126 if flags.has_any(UmaFlags::VToSlab) {
127 flags |= UmaFlags::Offpage;
128 } else {
129 flags |= UmaFlags::Offpage | UmaFlags::Hash;
130 }
131 }
132
133 (rsize, 1, ipers)
134 }
135 };
136
137 if flags.has_any(UmaFlags::Offpage) {
138 }
140
141 let alloc = if ppera == 1 {
143 small_alloc
145 } else {
146 Self::page_alloc
147 };
148
149 if flags.has_any(UmaFlags::MtxClass) {
150 todo!()
151 }
152
153 let mut pgoff = 0;
155
156 if !flags.has_any(UmaFlags::Offpage) {
157 let space = ppera * PAGE_SIZE.get();
158
159 pgoff = (space - hdr.size()) - ipers * free_item;
161
162 if space < pgoff + hdr.size() + ipers * free_item {
164 panic!("UMA slab won't fit");
165 }
166 }
167
168 if flags.has_any(UmaFlags::Hash) {
169 todo!()
170 }
171
172 Self {
175 vm,
176 size,
177 rsize,
178 pgoff,
179 ppera,
180 ipers,
181 alloc,
182 init,
183 max_pages: 0,
184 recurse: AtomicU32::new(0),
185 flags,
186 state: Mutex::new(KegState {
187 pages: 0,
188 free: 0,
189 partial_slabs: VecDeque::new(),
190 }),
191 }
192 }
193}
194
195impl<T> UmaKeg<T> {
196 pub fn size(&self) -> NonZero<usize> {
197 self.size
198 }
199
200 pub fn allocated_size(&self) -> usize {
201 self.rsize
202 }
203
204 pub fn item_per_slab(&self) -> usize {
205 self.ipers
206 }
207
208 pub fn recurse(&self) -> u32 {
209 self.recurse.load(Ordering::Relaxed)
210 }
211
212 pub fn flags(&self) -> UmaFlags {
213 self.flags
214 }
215
216 fn page_alloc(_: &Vm, _: Alloc) -> *mut u8 {
223 todo!()
224 }
225}
226
227impl<T: FreeItem> UmaKeg<T> {
228 pub fn fetch_slab(self: &Arc<Self>, mut flags: Alloc) -> Option<NonNull<Slab<T>>> {
238 let mut state = self.state.lock();
239
240 while state.free == 0 {
241 if flags.has_any(Alloc::NoVm) {
242 return None;
243 }
244
245 #[allow(clippy::while_immutable_condition)] while self.max_pages != 0 && self.max_pages <= state.pages {
247 todo!()
248 }
249
250 self.recurse.fetch_add(1, Ordering::Relaxed);
251 let slab = self.alloc_slab(&mut state, flags);
252 self.recurse.fetch_sub(1, Ordering::Relaxed);
253
254 if let Some(slab) = NonNull::new(slab) {
255 state.partial_slabs.push_front(slab);
256 return Some(slab);
257 }
258
259 flags |= Alloc::NoVm;
260 }
261
262 todo!()
263 }
264
265 fn alloc_slab(self: &Arc<Self>, state: &mut KegState<T>, flags: Alloc) -> *mut Slab<T> {
272 if self.flags.has_any(UmaFlags::Offpage) {
273 todo!()
274 } else {
275 let flags = if self.flags.has_any(UmaFlags::Malloc) {
277 flags & !Alloc::Zero
278 } else {
279 flags | Alloc::Zero
280 };
281
282 let mem = (self.alloc)(&self.vm, flags);
284
285 if !mem.is_null() {
286 let hdr = unsafe { mem.byte_add(self.pgoff).cast::<SlabHdr<T>>() };
289
290 if self.flags.has_any(UmaFlags::VToSlab) && self.ppera != 0 {
291 todo!()
292 }
293
294 let v = SlabHdr {
299 keg: self.clone(),
300 free_count: self.ipers,
301 first_free: 0,
302 items: mem,
303 };
304
305 unsafe { hdr.write(v) };
306
307 let (_, off) = Layout::new::<SlabHdr<T>>()
309 .extend(Layout::new::<T>())
310 .unwrap();
311 let free = unsafe { hdr.byte_add(off).cast::<T>() };
312
313 for i in 0..self.ipers {
314 let item = T::new(i);
315
316 unsafe { free.add(i).write(item) };
317 }
318
319 if self.init.is_some() {
320 todo!()
321 }
322
323 if self.flags.has_any(UmaFlags::Hash) {
324 todo!()
325 }
326
327 state.pages += self.ppera;
328 state.free += self.ipers;
329
330 return core::ptr::slice_from_raw_parts_mut(hdr, self.ipers) as *mut Slab<T>;
331 }
332
333 todo!()
334 }
335 }
336}
337
338struct KegState<T> {
340 pages: usize, free: usize, partial_slabs: VecDeque<NonNull<Slab<T>>>, }
344
345unsafe impl<T: Send> Send for KegState<T> {}