1use super::arch::small_alloc;
2use super::slab::{Free, RcFree, Slab};
3use super::{Alloc, Uma, UmaFlags, UmaZone};
4use crate::config::{PAGE_MASK, PAGE_SHIFT, PAGE_SIZE};
5use crate::vm::Vm;
6use alloc::sync::Arc;
7use core::alloc::Layout;
8use core::cmp::{max, min};
9use core::num::NonZero;
10
11pub struct UmaKeg {
13 vm: Arc<Vm>,
14 size: NonZero<usize>, ipers: usize, alloc: fn(&Vm, Alloc), max_pages: u32, pages: u32, free: u32, recurse: u32, flags: UmaFlags, }
23
24impl UmaKeg {
25 pub(super) fn new(
35 vm: Arc<Vm>,
36 size: NonZero<usize>,
37 align: usize,
38 mut flags: UmaFlags,
39 ) -> Self {
40 if flags.has_any(UmaFlags::Vm) {
41 todo!()
42 }
43
44 if flags.has_any(UmaFlags::ZInit) {
45 todo!()
46 }
47
48 if flags.has_any(UmaFlags::Malloc | UmaFlags::RefCnt) {
49 flags |= UmaFlags::VToSlab;
50 }
51
52 let hdr = Layout::new::<Slab<()>>();
54 let (mut hdr, off) = if flags.has_any(UmaFlags::RefCnt) {
55 hdr.extend(Layout::new::<RcFree>()).unwrap()
56 } else {
57 hdr.extend(Layout::new::<Free>()).unwrap()
58 };
59
60 hdr = hdr.pad_to_align();
61
62 let free_item = hdr.size() - off;
64 let available = PAGE_SIZE.get() - hdr.size();
65
66 let (ppera, ipers) = if flags.has_any(UmaFlags::CacheSpread) {
68 let rsize = if (size.get() & align) == 0 {
70 size.get()
71 } else {
72 (size.get() & !align) + align + 1
73 };
74
75 let align = align + 1;
77 let rsize = if (rsize & align) == 0 {
78 rsize + align
80 } else {
81 rsize
82 };
83
84 let pages = (PAGE_SIZE.get() / align * rsize) >> PAGE_SHIFT;
86 let ppera = min(pages, (128 * 1024) / PAGE_SIZE);
87
88 let ipers = (ppera * PAGE_SIZE.get() + (rsize - size.get())) / rsize;
90
91 (ppera, ipers)
92 } else {
93 if (size.get() + free_item) > available {
95 if !flags.has_any(UmaFlags::Internal) {
97 flags |= UmaFlags::Offpage;
98
99 if !flags.has_any(UmaFlags::VToSlab) {
100 flags |= UmaFlags::Hash;
101 }
102 }
103
104 let mut ppera = size.get() >> PAGE_SHIFT;
106
107 if size.get() > (size.get() & !PAGE_MASK.get()) {
108 ppera += 1;
109 }
110
111 (ppera, 1)
112 } else {
113 let rsize = max(size, Uma::SMALLEST_UNIT);
115 let rsize = if (align & rsize.get()) == 0 {
116 rsize.get()
117 } else {
118 align + 1 + (!align & rsize.get())
120 };
121
122 let ipers = available / (rsize + free_item);
124
125 if !flags.has_any(UmaFlags::Internal | UmaFlags::CacheOnly)
127 && (available % (rsize + free_item)) >= Uma::MAX_WASTE.get()
128 && (PAGE_SIZE.get() / rsize) > ipers
129 {
130 todo!()
131 }
132
133 (1, ipers)
134 }
135 };
136
137 if flags.has_any(UmaFlags::Offpage) {
138 if flags.has_any(UmaFlags::RefCnt) {
139 } else {
141 }
143 }
144
145 let alloc = if ppera == 1 {
147 small_alloc
149 } else {
150 Self::page_alloc
151 };
152
153 if flags.has_any(UmaFlags::MtxClass) {
154 todo!()
155 }
156
157 if !flags.has_any(UmaFlags::Offpage) {
158 let space = ppera * PAGE_SIZE.get();
159 let pgoff = (space - hdr.size()) - ipers * free_item;
160
161 if space < pgoff + hdr.size() + ipers * free_item {
163 panic!("UMA slab won't fit");
164 }
165 }
166
167 if flags.has_any(UmaFlags::Hash) {
168 todo!()
169 }
170
171 Self {
174 vm,
175 size,
176 ipers,
177 alloc,
178 max_pages: 0,
179 pages: 0,
180 free: 0,
181 recurse: 0,
182 flags,
183 }
184 }
185
186 pub fn size(&self) -> NonZero<usize> {
187 self.size
188 }
189
190 pub fn item_per_slab(&self) -> usize {
191 self.ipers
192 }
193
194 pub fn recurse(&self) -> u32 {
195 self.recurse
196 }
197
198 pub fn flags(&self) -> UmaFlags {
199 self.flags
200 }
201
202 pub fn fetch_slab(&mut self, _: &UmaZone, flags: Alloc) -> Option<()> {
209 while self.free == 0 {
210 if flags.has_any(Alloc::NoVm) {
211 return None;
212 }
213
214 #[allow(clippy::while_immutable_condition)] while self.max_pages != 0 && self.max_pages <= self.pages {
216 todo!()
217 }
218
219 self.recurse += 1;
220 self.alloc_slab(flags);
221 self.recurse -= 1;
222
223 todo!()
224 }
225
226 todo!()
227 }
228
229 fn alloc_slab(&self, flags: Alloc) {
236 if self.flags.has_any(UmaFlags::Offpage) {
237 todo!()
238 } else {
239 let flags = if self.flags.has_any(UmaFlags::Malloc) {
240 flags & !Alloc::Zero
241 } else {
242 flags | Alloc::Zero
243 };
244
245 (self.alloc)(&self.vm, flags);
246 todo!()
247 }
248 }
249
250 fn page_alloc(_: &Vm, _: Alloc) {
257 todo!()
258 }
259}