1pub use self::object::*;
2pub use self::page::*;
3
4use self::stats::VmStats;
5use crate::config::PAGE_SIZE;
6use crate::context::{config, current_thread};
7use crate::dmem::Dmem;
8use crate::lock::GutexGroup;
9use crate::proc::Proc;
10use alloc::sync::{Arc, Weak};
11use core::cmp::max;
12use core::fmt::Debug;
13use core::sync::atomic::{AtomicUsize, Ordering};
14use krt::info;
15use macros::bitflag;
16use thiserror::Error;
17
18mod object;
19mod page;
20mod stats;
21
22pub struct Vm {
24 stats: [VmStats; 2],
25 pagers: [Weak<Proc>; 2], pages_deficit: [AtomicUsize; 2], }
28
29impl Vm {
30 pub fn new(phys_avail: [u64; 61], dmem: &Dmem) -> Result<Arc<Self>, VmError> {
37 let page_size = u64::try_from(PAGE_SIZE.get()).unwrap();
39 let config = config();
40 let blocked = config.env("vm.blacklist");
41 let unk = dmem.game_end() - dmem.config().fmem_max.get();
42 let mut page_count = [0; 2];
43 let mut free_count = [0; 2];
44
45 for i in (0..).step_by(2) {
46 let mut addr = phys_avail[i];
48 let end = phys_avail[i + 1];
49
50 if end == 0 {
51 break;
52 }
53
54 while addr < end {
55 if blocked.is_some() {
56 todo!();
57 }
58
59 if addr < unk || dmem.game_end() <= addr {
60 page_count[0] += 1;
62 free_count[0] += 1;
63 } else {
64 page_count[1] += 1;
66 }
67
68 addr += page_size;
69 }
70 }
71
72 info!(
73 concat!(
74 "VM stats initialized.\n",
75 "v_page_count[0]: {}\n",
76 "v_free_count[0]: {}\n",
77 "v_page_count[1]: {}"
78 ),
79 page_count[0], free_count[0], page_count[1]
80 );
81
82 let pageout_page_count = 0x10; let gg = GutexGroup::new();
86 let stats = [
87 VmStats {
88 free_reserved: pageout_page_count + 100 + 10,
89 cache_count: gg.clone().spawn_default(),
90 free_count: gg.clone().spawn(free_count[0]),
91 interrupt_free_min: gg.clone().spawn(2),
92 },
93 VmStats {
94 free_reserved: pageout_page_count,
95 cache_count: gg.clone().spawn_default(),
96 free_count: gg.clone().spawn(free_count[1]),
97 interrupt_free_min: gg.clone().spawn(2),
98 },
99 ];
100
101 let mut vm = Self {
104 stats,
105 pagers: Default::default(),
106 pages_deficit: [AtomicUsize::new(0), AtomicUsize::new(0)],
107 };
108
109 vm.spawn_pagers();
110
111 Ok(Arc::new(vm))
112 }
113
114 pub fn alloc_page(&self, obj: Option<VmObject>, flags: VmAlloc) -> Option<VmPage> {
121 let vm = obj.as_ref().map_or(0, |v| v.vm());
122 let td = current_thread();
123 let stats = &self.stats[vm];
124 let cache_count = stats.cache_count.read();
125 let free_count = stats.free_count.read();
126 let available = *free_count + *cache_count;
127
128 if available <= stats.free_reserved {
129 let p = td.proc();
130 let mut flags = if Arc::as_ptr(p) == self.pagers[p.pager()].as_ptr() {
131 VmAlloc::System.into()
132 } else {
133 flags & (VmAlloc::Interrupt | VmAlloc::System)
134 };
135
136 if (flags & (VmAlloc::Interrupt | VmAlloc::System)) == VmAlloc::Interrupt {
137 flags = VmAlloc::Interrupt.into();
138 }
139
140 if flags == VmAlloc::Interrupt {
141 todo!()
142 } else if flags == VmAlloc::System {
143 if available <= *stats.interrupt_free_min.read() {
144 let deficit = max(1, flags.get(VmAlloc::Count));
145
146 drop(free_count);
147 drop(cache_count);
148
149 self.pages_deficit[vm].fetch_add(deficit.into(), Ordering::Relaxed);
150 self.wake_pager(vm);
151
152 return None;
153 }
154 } else {
155 todo!()
156 }
157 }
158
159 let page = match obj {
161 Some(_) => todo!(),
162 None => {
163 if flags.has_any(VmAlloc::Cached) {
164 return None;
165 }
166
167 self.alloc_phys()
168 }
169 };
170
171 match page.flags().has_any(PageFlags::Cached) {
172 true => todo!(),
173 false => todo!(),
174 }
175 }
176
177 fn alloc_phys(&self) -> VmPage {
184 todo!()
185 }
186
187 fn spawn_pagers(&mut self) {
194 }
197
198 fn wake_pager(&self, _: usize) {
205 todo!()
206 }
207}
208
209#[bitflag(u32)]
211pub enum VmAlloc {
212 Interrupt = 0x00000001,
214 System = 0x00000002,
216 Cached = 0x00000400,
218 Count(u16) = 0xFFFF0000,
220}
221
222#[derive(Debug, Error)]
224pub enum VmError {}