1pub use self::object::*;
2pub use self::page::*;
3
4use self::phys::PhysAllocator;
5use self::stats::VmStats;
6use crate::config::{PAGE_SHIFT, PAGE_SIZE};
7use crate::context::{config, current_thread};
8use crate::dmem::Dmem;
9use crate::lock::GutexGroup;
10use crate::proc::Proc;
11use alloc::sync::{Arc, Weak};
12use alloc::vec::Vec;
13use core::cmp::max;
14use core::fmt::Debug;
15use core::sync::atomic::{AtomicUsize, Ordering};
16use krt::info;
17use macros::bitflag;
18use thiserror::Error;
19
20mod object;
21mod page;
22mod phys;
23mod stats;
24
25pub struct Vm {
27 phys: PhysAllocator,
28 pages: Vec<Arc<VmPage>>, stats: [VmStats; 2],
30 pagers: [Weak<Proc>; 2], pages_deficit: [AtomicUsize; 2], }
33
34impl Vm {
35 pub fn new(
42 phys_avail: [u64; 61],
43 ma: Option<&MemAffinity>,
44 dmem: &Dmem,
45 ) -> Result<Arc<Self>, VmError> {
46 let phys = PhysAllocator::new(&phys_avail, ma);
47
48 let config = config();
51 let blocked = config.env("vm.blacklist");
52 let unk = dmem.game_end() - dmem.config().fmem_max.get();
53 let mut pages = Vec::new();
54 let mut free_pages = Vec::new();
55 let mut page_count = [0; 2];
56 let mut free_count = [0; 2];
57
58 for i in (0..).step_by(2) {
59 let addr = phys_avail[i];
61 let end = phys_avail[i + 1];
62
63 if end == 0 {
64 break;
65 }
66
67 for addr in (addr..end).step_by(PAGE_SIZE.get()) {
68 if blocked.is_some() {
70 let pi = pages.len();
73
74 pages.push(Arc::new(VmPage::new(pi, 0, 0, addr, 0)));
75
76 todo!();
77 }
78
79 let vm;
81 let free = if addr < unk || addr >= dmem.game_end() {
82 vm = 0;
84
85 page_count[0] += 1;
86 free_count[0] += 1;
87
88 true
89 } else {
90 vm = 1;
92
93 page_count[1] += 1;
94
95 false
96 };
97
98 let pi = pages.len();
100 let seg = phys.segment_index(addr).unwrap();
101 let page = Arc::new(VmPage::new(pi, vm, 0, addr, seg));
102
103 if free {
104 free_pages.push(page.clone());
105 }
106
107 pages.push(page);
108 }
109 }
110
111 info!(
112 concat!(
113 "VM stats initialized.\n",
114 "v_page_count[0]: {}\n",
115 "v_free_count[0]: {}\n",
116 "v_page_count[1]: {}"
117 ),
118 page_count[0], free_count[0], page_count[1]
119 );
120
121 let pageout_page_count = 0x10; let gg = GutexGroup::new();
125 let stats = [
126 VmStats {
127 free_reserved: pageout_page_count + 100 + 10,
128 cache_count: gg.clone().spawn_default(),
129 free_count: gg.clone().spawn(free_count[0]),
130 interrupt_free_min: gg.clone().spawn(2),
131 },
132 VmStats {
133 free_reserved: pageout_page_count,
134 cache_count: gg.clone().spawn_default(),
135 free_count: gg.clone().spawn(free_count[1]),
136 interrupt_free_min: gg.clone().spawn(2),
137 },
138 ];
139
140 let mut vm = Self {
143 phys,
144 pages,
145 stats,
146 pagers: Default::default(),
147 pages_deficit: [AtomicUsize::new(0), AtomicUsize::new(0)],
148 };
149
150 for page in free_pages {
151 vm.free_page(&page, 0);
152 }
153
154 vm.spawn_pagers();
157
158 Ok(Arc::new(vm))
159 }
160
161 pub fn alloc_page(&self, obj: Option<VmObject>, flags: VmAlloc) -> Option<VmPage> {
168 let vm = obj.as_ref().map_or(0, |v| v.vm());
169 let td = current_thread();
170 let stats = &self.stats[vm];
171 let cache_count = stats.cache_count.read();
172 let mut free_count = stats.free_count.write();
173 let available = *free_count + *cache_count;
174
175 if available <= stats.free_reserved {
176 let p = td.proc();
177 let mut flags = if Arc::as_ptr(p) == self.pagers[p.pager()].as_ptr() {
178 VmAlloc::System.into()
179 } else {
180 flags & (VmAlloc::Interrupt | VmAlloc::System)
181 };
182
183 if (flags & (VmAlloc::Interrupt | VmAlloc::System)) == VmAlloc::Interrupt {
184 flags = VmAlloc::Interrupt.into();
185 }
186
187 if flags == VmAlloc::Interrupt {
188 todo!()
189 } else if flags == VmAlloc::System {
190 if available <= *stats.interrupt_free_min.read() {
191 let deficit = max(1, flags.get(VmAlloc::Count));
192
193 drop(free_count);
194 drop(cache_count);
195
196 self.pages_deficit[vm].fetch_add(deficit.into(), Ordering::Relaxed);
197 self.wake_pager(vm);
198
199 return None;
200 }
201 } else {
202 todo!()
203 }
204 }
205
206 let page = match &obj {
208 Some(_) => todo!(),
209 None => {
210 if flags.has_any(VmAlloc::Cached) {
211 return None;
212 }
213
214 self.phys
215 .alloc_page(&self.pages, vm, obj.is_none().into(), 0)
216 }
217 };
218
219 let page = page.unwrap();
221 let mut pf = page.flags().lock();
222
223 match pf.has_any(PageFlags::Cached) {
224 true => todo!(),
225 false => *free_count -= 1,
226 }
227
228 match pf.has_any(PageFlags::Zero) {
229 true => todo!(),
230 false => *pf = PageFlags::zeroed(),
231 }
232
233 drop(pf);
234
235 *page.access().lock() = PageAccess::zeroed();
236
237 let mut oflags = PageExtFlags::zeroed();
239
240 match &obj {
241 Some(_) => todo!(),
242 None => oflags |= PageExtFlags::Unmanaged,
243 }
244
245 if !flags.has_any(VmAlloc::NoBusy | VmAlloc::NoObj) {
246 oflags |= PageExtFlags::Busy;
247 }
248
249 *page.extended_flags().lock() = oflags;
250
251 if flags.has_any(VmAlloc::Wired) {
252 todo!();
253 }
254
255 todo!()
256 }
257
258 fn free_page(&self, page: &Arc<VmPage>, mut order: usize) {
267 let mut page = page; let vm = page.vm();
270 let mut pa = page.addr();
271 let seg = if (page.unk1() & 1) == 0 {
272 self.phys.segment(page.segment())
273 } else {
274 todo!()
275 };
276
277 let mut queues = seg.free_queues.lock();
279 let mut po = page.order().lock();
280 let mut pp = page.pool().lock();
281
282 while order < 12 {
283 let start = seg.start;
284 let buddy_pa = pa ^ (1u64 << (order + PAGE_SHIFT)); if buddy_pa < start || buddy_pa >= seg.end {
287 break;
288 }
289
290 let i = (buddy_pa - start) >> PAGE_SHIFT;
292 let buddy = &self.pages[seg.first_page + usize::try_from(i).unwrap()];
293 let mut bo = buddy.order().lock();
294
295 if *bo != order || buddy.vm() != vm || ((page.unk1() ^ buddy.unk1()) & 1) != 0 {
296 break;
297 }
298
299 let bp = buddy.pool().lock();
302
303 queues[vm][*bp][*bo].shift_remove(buddy);
304 *bo = VmPage::FREE_ORDER;
305
306 if *bp != *pp {
307 todo!()
308 }
309
310 drop(bp);
311 drop(bo);
312
313 order += 1;
314 pa &= !((1u64 << (order + PAGE_SHIFT)) - 1);
315 page =
316 &self.pages[seg.first_page + usize::try_from((pa - start) >> PAGE_SHIFT).unwrap()];
317 po = page.order().lock();
318 pp = page.pool().lock();
319 }
320
321 *po = order;
323 queues[vm][*pp][order].insert(page.clone());
324 }
325
326 fn spawn_pagers(&mut self) {
333 }
336
337 fn wake_pager(&self, _: usize) {
344 todo!()
345 }
346}
347
348pub struct MemAffinity {}
350
351#[bitflag(u32)]
353pub enum VmAlloc {
354 Interrupt = 0x00000001,
356 System = 0x00000002,
358 Wired = 0x00000020,
360 NoObj = 0x00000100,
362 NoBusy = 0x00000200,
364 Cached = 0x00000400,
366 Count(u16) = 0xFFFF0000,
368}
369
370#[derive(Debug, Error)]
372pub enum VmError {}