1pub use self::object::*;
2pub use self::page::*;
3
4use self::phys::PhysAllocator;
5use self::stats::VmStats;
6use crate::config::{PAGE_SHIFT, PAGE_SIZE};
7use crate::context::{config, current_thread};
8use crate::dmem::Dmem;
9use crate::lock::Mutex;
10use crate::proc::Proc;
11use alloc::sync::{Arc, Weak};
12use alloc::vec::Vec;
13use core::cmp::max;
14use core::fmt::Debug;
15use core::sync::atomic::{AtomicUsize, Ordering};
16use krt::info;
17use macros::bitflag;
18use thiserror::Error;
19
20#[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")]
21#[cfg_attr(target_arch = "x86_64", path = "x86_64.rs")]
22mod arch;
23mod object;
24mod page;
25mod phys;
26mod stats;
27
28pub struct Vm {
30 phys: PhysAllocator,
31 pages: Vec<Arc<VmPage>>, stats: [Mutex<VmStats>; 2],
33 pagers: [Weak<Proc>; 2], pages_deficit: [AtomicUsize; 2], }
36
37impl Vm {
38 pub fn new(
45 phys_avail: [usize; 61],
46 ma: Option<&MemAffinity>,
47 dmem: &Dmem,
48 ) -> Result<Arc<Self>, VmError> {
49 let phys = PhysAllocator::new(&phys_avail, ma);
50
51 let config = config();
54 let blocked = config.env("vm.blacklist");
55 let unk = dmem.game_end() - dmem.config().fmem_max.get();
56 let mut pages = Vec::new();
57 let mut free_pages = Vec::new();
58 let mut page_count = [0; 2];
59 let mut free_count = [0; 2];
60
61 for i in (0..).step_by(2) {
62 let addr = phys_avail[i];
64 let end = phys_avail[i + 1];
65
66 if end == 0 {
67 break;
68 }
69
70 for addr in (addr..end).step_by(PAGE_SIZE.get()) {
71 if blocked.is_some() {
73 let pi = pages.len();
76
77 pages.push(Arc::new(VmPage::new(pi, 0, 0, addr, 0)));
78
79 todo!();
80 }
81
82 let vm;
84 let free = if addr < unk || addr >= dmem.game_end() {
85 vm = 0;
87
88 page_count[0] += 1;
89 free_count[0] += 1;
90
91 true
92 } else {
93 vm = 1;
95
96 page_count[1] += 1;
97
98 false
99 };
100
101 let pi = pages.len();
103 let seg = phys.segment_index(addr).unwrap();
104 let page = Arc::new(VmPage::new(pi, vm, 0, addr, seg));
105
106 if free {
107 free_pages.push(page.clone());
108 }
109
110 pages.push(page);
111 }
112 }
113
114 info!(
115 concat!(
116 "VM stats initialized.\n",
117 "v_page_count[0]: {}\n",
118 "v_free_count[0]: {}\n",
119 "v_page_count[1]: {}"
120 ),
121 page_count[0], free_count[0], page_count[1]
122 );
123
124 let pageout_page_count = 0x10; let free_reserved = [pageout_page_count + 100 + 10, pageout_page_count];
128 let free_min = [free_reserved[0] + 325, free_reserved[1] + 64];
129 let stats = [
130 Mutex::new(VmStats {
131 free_reserved: free_reserved[0],
132 cache_min: if free_count[0] < 2049 {
133 0
135 } else if free_count[0] < 6145 {
136 free_reserved[0] + free_min[0] * 2
138 } else {
139 free_reserved[0] + free_min[0] * 4
140 },
141 cache_count: 0,
142 free_count: free_count[0],
143 interrupt_free_min: 2,
144 wire_count: 0,
145 }),
146 Mutex::new(VmStats {
147 free_reserved: free_reserved[1],
148 cache_min: if free_count[1] < 2049 {
149 0
151 } else if free_count[1] < 6145 {
152 free_reserved[1] + free_min[1] * 2
154 } else {
155 free_reserved[1] + free_min[1] * 4
156 },
157 cache_count: 0,
158 free_count: free_count[1],
159 interrupt_free_min: 2,
160 wire_count: 0,
161 }),
162 ];
163
164 let mut vm = Self {
167 phys,
168 pages,
169 stats,
170 pagers: Default::default(),
171 pages_deficit: [AtomicUsize::new(0), AtomicUsize::new(0)],
172 };
173
174 for page in free_pages {
175 vm.free_page(&page, 0);
176 }
177
178 vm.spawn_pagers();
181
182 Ok(Arc::new(vm))
183 }
184
185 pub fn alloc_page(
192 &self,
193 obj: Option<VmObject>,
194 pindex: usize,
195 flags: VmAlloc,
196 ) -> Option<Arc<VmPage>> {
197 let vm = obj.as_ref().map_or(0, |v| v.vm());
198 let td = current_thread();
199 let mut stats = self.stats[vm].lock();
200 let available = stats.free_count + stats.cache_count;
201
202 if available <= stats.free_reserved {
203 let p = td.proc();
204 let mut flags = if Arc::as_ptr(p) == self.pagers[p.pager()].as_ptr() {
205 VmAlloc::System.into()
206 } else {
207 flags & (VmAlloc::Interrupt | VmAlloc::System)
208 };
209
210 if (flags & (VmAlloc::Interrupt | VmAlloc::System)) == VmAlloc::Interrupt {
211 flags = VmAlloc::Interrupt.into();
212 }
213
214 if flags == VmAlloc::Interrupt {
215 todo!()
216 } else if flags == VmAlloc::System {
217 if available <= stats.interrupt_free_min {
218 let deficit = max(1, flags.get(VmAlloc::Count));
219
220 drop(stats);
221
222 self.pages_deficit[vm].fetch_add(deficit.into(), Ordering::Relaxed);
223 self.wake_pager(vm);
224
225 return None;
226 }
227 } else {
228 todo!()
229 }
230 }
231
232 let page = match &obj {
234 Some(_) => todo!(),
235 None => {
236 if flags.has_any(VmAlloc::Cached) {
237 return None;
238 }
239
240 self.phys
241 .alloc_page(&self.pages, vm, obj.is_none().into(), 0)
242 }
243 };
244
245 let page = page.unwrap();
247 let mut ps = page.state.lock();
248
249 match ps.flags.has_any(PageFlags::Cached) {
250 true => todo!(),
251 false => stats.free_count -= 1,
252 }
253
254 match ps.flags.has_any(PageFlags::Zero) {
255 true => todo!(),
256 false => ps.flags = PageFlags::zeroed(),
257 }
258
259 ps.access = PageAccess::zeroed();
260
261 let mut oflags = PageExtFlags::zeroed();
263
264 match &obj {
265 Some(_) => todo!(),
266 None => oflags |= PageExtFlags::Unmanaged,
267 }
268
269 if !flags.has_any(VmAlloc::NoBusy | VmAlloc::NoObj) {
270 oflags |= PageExtFlags::Busy;
271 }
272
273 ps.extended_flags = oflags;
274
275 if flags.has_any(VmAlloc::Wired) {
276 stats.wire_count += 1;
277 ps.wire_count = 1;
278 }
279
280 ps.act_count = 0;
281
282 match &obj {
283 Some(_) => todo!(),
284 None => ps.pindex = pindex,
285 }
286
287 if (stats.cache_count + stats.free_count) < (stats.cache_min + stats.free_reserved) {
289 todo!()
290 }
291
292 drop(ps);
294
295 Some(page)
296 }
297
298 fn free_page(&self, page: &Arc<VmPage>, mut order: usize) {
307 let mut page = page; let vm = page.vm;
310 let mut pa = page.addr;
311 let seg = if (page.unk1 & 1) == 0 {
312 self.phys.segment(page.segment)
313 } else {
314 todo!()
315 };
316
317 let mut queues = seg.free_queues.lock();
319 let mut ps = page.state.lock();
320
321 while order < 12 {
322 let start = seg.start;
323 let buddy_pa = pa ^ (1usize << (order + PAGE_SHIFT)); if buddy_pa < start || buddy_pa >= seg.end {
326 break;
327 }
328
329 let buddy = &self.pages[seg.first_page + ((buddy_pa - start) >> PAGE_SHIFT)];
331 let mut bs = buddy.state.lock();
332
333 if bs.order != order || buddy.vm != vm || ((page.unk1 ^ buddy.unk1) & 1) != 0 {
334 break;
335 }
336
337 queues[vm][bs.pool][bs.order].shift_remove(buddy);
340 bs.order = VmPage::FREE_ORDER;
341
342 if bs.pool != ps.pool {
343 todo!()
344 }
345
346 drop(bs);
347
348 order += 1;
349 pa &= !((1usize << (order + PAGE_SHIFT)) - 1);
350 page = &self.pages[seg.first_page + ((pa - start) >> PAGE_SHIFT)];
351 ps = page.state.lock();
352 }
353
354 ps.order = order;
356 queues[vm][ps.pool][order].insert(page.clone());
357 }
358
359 fn spawn_pagers(&mut self) {
366 }
369
370 fn wake_pager(&self, _: usize) {
377 todo!()
378 }
379}
380
381pub struct MemAffinity {}
383
384#[bitflag(u32)]
386pub enum VmAlloc {
387 Interrupt = 0x00000001,
389 System = 0x00000002,
391 Wired = 0x00000020,
393 NoObj = 0x00000100,
395 NoBusy = 0x00000200,
397 Cached = 0x00000400,
399 Count(u16) = 0xFFFF0000,
401}
402
403#[derive(Debug, Error)]
405pub enum VmError {}