1pub use self::object::*;
2pub use self::page::*;
3
4use self::stats::VmStats;
5use crate::config::{Dipsw, PAGE_MASK, PAGE_SHIFT, PAGE_SIZE};
6use crate::context::{current_arch, current_config, current_thread};
7use crate::lock::GutexGroup;
8use crate::proc::Proc;
9use alloc::sync::{Arc, Weak};
10use config::{BootEnv, MapType};
11use core::cmp::{max, min};
12use core::fmt::Debug;
13use core::sync::atomic::{AtomicUsize, Ordering};
14use krt::{boot_env, warn};
15use macros::bitflag;
16use thiserror::Error;
17
18mod object;
19mod page;
20mod stats;
21
22pub struct Vm {
24 boot_area: u64, boot_addr: u64, boot_tables: u64, initial_memory_size: u64, end_page: u64, stats: [VmStats; 2],
30 pagers: [Weak<Proc>; 2], pages_deficit: [AtomicUsize; 2], }
33
34impl Vm {
35 pub fn new() -> Result<Arc<Self>, VmError> {
42 let pageout_page_count = 0x10; let gg = GutexGroup::new();
46 let stats = [
47 VmStats {
48 free_reserved: pageout_page_count + 100 + 10, cache_count: gg.clone().spawn_default(),
50 free_count: gg.clone().spawn_default(),
51 interrupt_free_min: gg.clone().spawn(2),
52 },
53 VmStats {
54 #[allow(clippy::identity_op)]
55 free_reserved: pageout_page_count + 0, cache_count: gg.clone().spawn_default(),
57 free_count: gg.clone().spawn_default(),
58 interrupt_free_min: gg.clone().spawn(2),
59 },
60 ];
61
62 let mut vm = Self {
65 boot_area: 0,
66 boot_addr: 0,
67 boot_tables: 0,
68 initial_memory_size: 0,
69 end_page: 0,
70 stats,
71 pagers: Default::default(),
72 pages_deficit: [AtomicUsize::new(0), AtomicUsize::new(0)],
73 };
74
75 vm.load_memory_map()?;
76
77 vm.spawn_pagers();
80
81 Ok(Arc::new(vm))
82 }
83
84 pub fn boot_area(&self) -> u64 {
85 self.boot_area
86 }
87
88 pub fn initial_memory_size(&self) -> u64 {
89 self.initial_memory_size
90 }
91
92 pub fn alloc_page(&self, obj: Option<VmObject>, flags: VmAlloc) -> Option<VmPage> {
99 let vm = obj.as_ref().map_or(0, |v| v.vm());
100 let td = current_thread();
101 let stats = &self.stats[vm];
102 let cache_count = stats.cache_count.read();
103 let free_count = stats.free_count.read();
104 let available = *free_count + *cache_count;
105
106 if available <= stats.free_reserved {
107 let p = td.proc();
108 let mut flags = if Arc::as_ptr(p) == self.pagers[p.pager()].as_ptr() {
109 VmAlloc::System.into()
110 } else {
111 flags & (VmAlloc::Interrupt | VmAlloc::System)
112 };
113
114 if (flags & (VmAlloc::Interrupt | VmAlloc::System)) == VmAlloc::Interrupt {
115 flags = VmAlloc::Interrupt.into();
116 }
117
118 if flags == VmAlloc::Interrupt {
119 todo!()
120 } else if flags == VmAlloc::System {
121 if available <= *stats.interrupt_free_min.read() {
122 let deficit = max(1, flags.get(VmAlloc::Count));
123
124 drop(free_count);
125 drop(cache_count);
126
127 self.pages_deficit[vm].fetch_add(deficit.into(), Ordering::Relaxed);
128 self.wake_pager(vm);
129
130 return None;
131 }
132 } else {
133 todo!()
134 }
135 }
136
137 todo!()
138 }
139
140 fn load_memory_map(&mut self) -> Result<(), VmError> {
147 let mut physmap = [0u64; 60];
149 let mut last = 0usize;
150 let map = match boot_env() {
151 BootEnv::Vm(v) => v.memory_map.as_slice(),
152 };
153
154 'top: for m in map {
155 match m.ty {
157 MapType::None => break,
158 MapType::Ram => (),
159 MapType::Reserved => continue,
160 }
161
162 if m.len == 0 {
164 break;
165 }
166
167 let mut insert_idx = last + 2;
169 let mut j = 0usize;
170
171 while j <= last {
172 if m.base < physmap[j + 1] {
173 if m.base + m.len > physmap[j] {
175 warn!("Overlapping memory regions, ignoring second region.");
176 continue 'top;
177 }
178
179 insert_idx = j;
180 break;
181 }
182
183 j += 2;
184 }
185
186 if insert_idx <= last && m.base + m.len == physmap[insert_idx] {
189 physmap[insert_idx] = m.base;
190 continue;
191 }
192
193 if insert_idx > 0 && m.base == physmap[insert_idx - 1] {
196 physmap[insert_idx - 1] = m.base + m.len;
197 continue;
198 }
199
200 last += 2;
201
202 if last == physmap.len() {
203 warn!("Too many segments in the physical address map, giving up.");
204 break;
205 }
206
207 #[allow(clippy::while_immutable_condition)]
210 while insert_idx < last {
211 todo!()
212 }
213
214 physmap[insert_idx] = m.base;
215 physmap[insert_idx + 1] = m.base + m.len;
216 }
217
218 if physmap[1] == 0 {
222 return Err(VmError::NoMemoryMap);
223 }
224
225 let page_size = PAGE_SIZE.get().try_into().unwrap();
227 let page_mask = !u64::try_from(PAGE_MASK.get()).unwrap();
228
229 for i in (0..=last).step_by(2) {
230 if physmap[i] == 0 {
232 self.boot_area = physmap[i + 1] / 1024;
234 }
235
236 let start = physmap[i].next_multiple_of(page_size);
238 let end = physmap[i + 1] & page_mask;
239
240 self.initial_memory_size += end.saturating_sub(start);
241 }
242
243 if self.boot_area == 0 {
244 return Err(VmError::NoBootArea);
245 }
246
247 physmap[1] = self.adjust_boot_area(physmap[1] / 1024);
250
251 self.end_page = physmap[last + 1] >> PAGE_SHIFT;
253
254 if let Some(v) = current_config().env("hw.physmem") {
255 self.end_page = min(v.parse::<u64>().unwrap() >> PAGE_SHIFT, self.end_page);
256 }
257
258 self.load_pmap();
260
261 Ok(())
262 }
263
264 fn spawn_pagers(&mut self) {
271 }
274
275 fn wake_pager(&self, _: usize) {
282 todo!()
283 }
284
285 fn adjust_boot_area(&mut self, original: u64) -> u64 {
292 let page_size = u64::try_from(PAGE_SIZE.get()).unwrap();
294 let page_mask = !u64::try_from(PAGE_MASK.get()).unwrap();
295 let need = u64::try_from(current_arch().secondary_start.len()).unwrap();
296 let addr = (original * 1024) & page_mask;
297
298 self.boot_addr = if need <= ((original * 1024) & 0xC00) {
300 addr
301 } else {
302 addr - page_size
303 };
304
305 self.boot_tables = self.boot_addr - (page_size * 3);
306 self.boot_tables
307 }
308
309 fn load_pmap(&mut self) {
316 let config = current_config();
317
318 if config.is_allow_disabling_aslr() && config.dipsw(Dipsw::DisabledKaslr) {
319 todo!()
320 } else {
321 }
324 }
325}
326
327#[bitflag(u32)]
329pub enum VmAlloc {
330 Interrupt = 0x00000001,
332 System = 0x00000002,
334 Count(u16) = 0xFFFF0000,
336}
337
338#[derive(Debug, Error)]
340pub enum VmError {
341 #[error("no memory map provided to the kernel")]
342 NoMemoryMap,
343
344 #[error("no boot area provided to the kernel")]
345 NoBootArea,
346}