1pub use self::object::*;
2pub use self::page::*;
3
4use self::phys::PhysAllocator;
5use self::stats::VmStats;
6use crate::config::{PAGE_SHIFT, PAGE_SIZE};
7use crate::context::{config, current_thread};
8use crate::dmem::Dmem;
9use crate::lock::Mutex;
10use crate::proc::Proc;
11use alloc::sync::{Arc, Weak};
12use alloc::vec::Vec;
13use core::cmp::max;
14use core::fmt::Debug;
15use core::sync::atomic::{AtomicUsize, Ordering};
16use krt::info;
17use macros::bitflag;
18use thiserror::Error;
19
20mod object;
21mod page;
22mod phys;
23mod stats;
24
25pub struct Vm {
27 phys: PhysAllocator,
28 pages: Vec<Arc<VmPage>>, stats: [Mutex<VmStats>; 2],
30 pagers: [Weak<Proc>; 2], pages_deficit: [AtomicUsize; 2], }
33
34impl Vm {
35 pub fn new(
42 phys_avail: [u64; 61],
43 ma: Option<&MemAffinity>,
44 dmem: &Dmem,
45 ) -> Result<Arc<Self>, VmError> {
46 let phys = PhysAllocator::new(&phys_avail, ma);
47
48 let config = config();
51 let blocked = config.env("vm.blacklist");
52 let unk = dmem.game_end() - dmem.config().fmem_max.get();
53 let mut pages = Vec::new();
54 let mut free_pages = Vec::new();
55 let mut page_count = [0; 2];
56 let mut free_count = [0; 2];
57
58 for i in (0..).step_by(2) {
59 let addr = phys_avail[i];
61 let end = phys_avail[i + 1];
62
63 if end == 0 {
64 break;
65 }
66
67 for addr in (addr..end).step_by(PAGE_SIZE.get()) {
68 if blocked.is_some() {
70 let pi = pages.len();
73
74 pages.push(Arc::new(VmPage::new(pi, 0, 0, addr, 0)));
75
76 todo!();
77 }
78
79 let vm;
81 let free = if addr < unk || addr >= dmem.game_end() {
82 vm = 0;
84
85 page_count[0] += 1;
86 free_count[0] += 1;
87
88 true
89 } else {
90 vm = 1;
92
93 page_count[1] += 1;
94
95 false
96 };
97
98 let pi = pages.len();
100 let seg = phys.segment_index(addr).unwrap();
101 let page = Arc::new(VmPage::new(pi, vm, 0, addr, seg));
102
103 if free {
104 free_pages.push(page.clone());
105 }
106
107 pages.push(page);
108 }
109 }
110
111 info!(
112 concat!(
113 "VM stats initialized.\n",
114 "v_page_count[0]: {}\n",
115 "v_free_count[0]: {}\n",
116 "v_page_count[1]: {}"
117 ),
118 page_count[0], free_count[0], page_count[1]
119 );
120
121 let pageout_page_count = 0x10; let free_reserved = [pageout_page_count + 100 + 10, pageout_page_count];
125 let free_min = [free_reserved[0] + 325, free_reserved[1] + 64];
126 let stats = [
127 Mutex::new(VmStats {
128 free_reserved: free_reserved[0],
129 cache_min: if free_count[0] < 2049 {
130 0
132 } else if free_count[0] < 6145 {
133 free_reserved[0] + free_min[0] * 2
135 } else {
136 free_reserved[0] + free_min[0] * 4
137 },
138 cache_count: 0,
139 free_count: free_count[0],
140 interrupt_free_min: 2,
141 wire_count: 0,
142 }),
143 Mutex::new(VmStats {
144 free_reserved: free_reserved[1],
145 cache_min: if free_count[1] < 2049 {
146 0
148 } else if free_count[1] < 6145 {
149 free_reserved[1] + free_min[1] * 2
151 } else {
152 free_reserved[1] + free_min[1] * 4
153 },
154 cache_count: 0,
155 free_count: free_count[1],
156 interrupt_free_min: 2,
157 wire_count: 0,
158 }),
159 ];
160
161 let mut vm = Self {
164 phys,
165 pages,
166 stats,
167 pagers: Default::default(),
168 pages_deficit: [AtomicUsize::new(0), AtomicUsize::new(0)],
169 };
170
171 for page in free_pages {
172 vm.free_page(&page, 0);
173 }
174
175 vm.spawn_pagers();
178
179 Ok(Arc::new(vm))
180 }
181
182 pub fn alloc_page(
189 &self,
190 obj: Option<VmObject>,
191 pindex: usize,
192 flags: VmAlloc,
193 ) -> Option<Arc<VmPage>> {
194 let vm = obj.as_ref().map_or(0, |v| v.vm());
195 let td = current_thread();
196 let mut stats = self.stats[vm].lock();
197 let available = stats.free_count + stats.cache_count;
198
199 if available <= stats.free_reserved {
200 let p = td.proc();
201 let mut flags = if Arc::as_ptr(p) == self.pagers[p.pager()].as_ptr() {
202 VmAlloc::System.into()
203 } else {
204 flags & (VmAlloc::Interrupt | VmAlloc::System)
205 };
206
207 if (flags & (VmAlloc::Interrupt | VmAlloc::System)) == VmAlloc::Interrupt {
208 flags = VmAlloc::Interrupt.into();
209 }
210
211 if flags == VmAlloc::Interrupt {
212 todo!()
213 } else if flags == VmAlloc::System {
214 if available <= stats.interrupt_free_min {
215 let deficit = max(1, flags.get(VmAlloc::Count));
216
217 drop(stats);
218
219 self.pages_deficit[vm].fetch_add(deficit.into(), Ordering::Relaxed);
220 self.wake_pager(vm);
221
222 return None;
223 }
224 } else {
225 todo!()
226 }
227 }
228
229 let page = match &obj {
231 Some(_) => todo!(),
232 None => {
233 if flags.has_any(VmAlloc::Cached) {
234 return None;
235 }
236
237 self.phys
238 .alloc_page(&self.pages, vm, obj.is_none().into(), 0)
239 }
240 };
241
242 let page = page.unwrap();
244 let mut ps = page.state.lock();
245
246 match ps.flags.has_any(PageFlags::Cached) {
247 true => todo!(),
248 false => stats.free_count -= 1,
249 }
250
251 match ps.flags.has_any(PageFlags::Zero) {
252 true => todo!(),
253 false => ps.flags = PageFlags::zeroed(),
254 }
255
256 ps.access = PageAccess::zeroed();
257
258 let mut oflags = PageExtFlags::zeroed();
260
261 match &obj {
262 Some(_) => todo!(),
263 None => oflags |= PageExtFlags::Unmanaged,
264 }
265
266 if !flags.has_any(VmAlloc::NoBusy | VmAlloc::NoObj) {
267 oflags |= PageExtFlags::Busy;
268 }
269
270 ps.extended_flags = oflags;
271
272 if flags.has_any(VmAlloc::Wired) {
273 stats.wire_count += 1;
274 ps.wire_count = 1;
275 }
276
277 ps.act_count = 0;
278
279 match &obj {
280 Some(_) => todo!(),
281 None => ps.pindex = pindex,
282 }
283
284 if (stats.cache_count + stats.free_count) < (stats.cache_min + stats.free_reserved) {
286 todo!()
287 }
288
289 drop(ps);
291
292 Some(page)
293 }
294
295 fn free_page(&self, page: &Arc<VmPage>, mut order: usize) {
304 let mut page = page; let vm = page.vm;
307 let mut pa = page.addr;
308 let seg = if (page.unk1 & 1) == 0 {
309 self.phys.segment(page.segment)
310 } else {
311 todo!()
312 };
313
314 let mut queues = seg.free_queues.lock();
316 let mut ps = page.state.lock();
317
318 while order < 12 {
319 let start = seg.start;
320 let buddy_pa = pa ^ (1u64 << (order + PAGE_SHIFT)); if buddy_pa < start || buddy_pa >= seg.end {
323 break;
324 }
325
326 let i = (buddy_pa - start) >> PAGE_SHIFT;
328 let buddy = &self.pages[seg.first_page + usize::try_from(i).unwrap()];
329 let mut bs = buddy.state.lock();
330
331 if bs.order != order || buddy.vm != vm || ((page.unk1 ^ buddy.unk1) & 1) != 0 {
332 break;
333 }
334
335 queues[vm][bs.pool][bs.order].shift_remove(buddy);
338 bs.order = VmPage::FREE_ORDER;
339
340 if bs.pool != ps.pool {
341 todo!()
342 }
343
344 drop(bs);
345
346 order += 1;
347 pa &= !((1u64 << (order + PAGE_SHIFT)) - 1);
348 page =
349 &self.pages[seg.first_page + usize::try_from((pa - start) >> PAGE_SHIFT).unwrap()];
350 ps = page.state.lock();
351 }
352
353 ps.order = order;
355 queues[vm][ps.pool][order].insert(page.clone());
356 }
357
358 fn spawn_pagers(&mut self) {
365 }
368
369 fn wake_pager(&self, _: usize) {
376 todo!()
377 }
378}
379
380pub struct MemAffinity {}
382
383#[bitflag(u32)]
385pub enum VmAlloc {
386 Interrupt = 0x00000001,
388 System = 0x00000002,
390 Wired = 0x00000020,
392 NoObj = 0x00000100,
394 NoBusy = 0x00000200,
396 Cached = 0x00000400,
398 Count(u16) = 0xFFFF0000,
400}
401
402#[derive(Debug, Error)]
404pub enum VmError {}