1pub use self::object::*;
2pub use self::page::*;
3
4use self::stats::VmStats;
5use crate::MemoryInfo;
6use crate::context::current_thread;
7use crate::lock::GutexGroup;
8use crate::proc::Proc;
9use alloc::sync::{Arc, Weak};
10use core::cmp::max;
11use core::fmt::Debug;
12use core::sync::atomic::{AtomicUsize, Ordering};
13use macros::bitflag;
14use thiserror::Error;
15
16mod object;
17mod page;
18mod stats;
19
20pub struct Vm {
22 boot_area: u64, boot_addr: u64, boot_tables: u64, initial_memory_size: u64, end_page: u64, stats: [VmStats; 2],
28 pagers: [Weak<Proc>; 2], pages_deficit: [AtomicUsize; 2], }
31
32impl Vm {
33 pub unsafe fn new(mi: &MemoryInfo) -> Result<Arc<Self>, VmError> {
43 let pageout_page_count = 0x10; let gg = GutexGroup::new();
47 let stats = [
48 VmStats {
49 free_reserved: pageout_page_count + 100 + 10,
50 cache_count: gg.clone().spawn_default(),
51 free_count: gg.clone().spawn_default(),
52 interrupt_free_min: gg.clone().spawn(2),
53 },
54 VmStats {
55 free_reserved: pageout_page_count,
56 cache_count: gg.clone().spawn_default(),
57 free_count: gg.clone().spawn_default(),
58 interrupt_free_min: gg.clone().spawn(2),
59 },
60 ];
61
62 let mut vm = Self {
65 boot_area: mi.boot_area,
66 boot_addr: mi.boot_info.addr,
67 boot_tables: mi.boot_info.page_tables,
68 initial_memory_size: mi.initial_memory_size,
69 end_page: mi.end_page,
70 stats,
71 pagers: Default::default(),
72 pages_deficit: [AtomicUsize::new(0), AtomicUsize::new(0)],
73 };
74
75 vm.spawn_pagers();
76
77 Ok(Arc::new(vm))
78 }
79
80 pub fn boot_area(&self) -> u64 {
81 self.boot_area
82 }
83
84 pub fn boot_addr(&self) -> u64 {
85 self.boot_addr
86 }
87
88 pub fn boot_tables(&self) -> u64 {
89 self.boot_tables
90 }
91
92 pub fn initial_memory_size(&self) -> u64 {
93 self.initial_memory_size
94 }
95
96 pub fn end_page(&self) -> u64 {
97 self.end_page
98 }
99
100 pub fn alloc_page(&self, obj: Option<VmObject>, flags: VmAlloc) -> Option<VmPage> {
107 let vm = obj.as_ref().map_or(0, |v| v.vm());
108 let td = current_thread();
109 let stats = &self.stats[vm];
110 let cache_count = stats.cache_count.read();
111 let free_count = stats.free_count.read();
112 let available = *free_count + *cache_count;
113
114 if available <= stats.free_reserved {
115 let p = td.proc();
116 let mut flags = if Arc::as_ptr(p) == self.pagers[p.pager()].as_ptr() {
117 VmAlloc::System.into()
118 } else {
119 flags & (VmAlloc::Interrupt | VmAlloc::System)
120 };
121
122 if (flags & (VmAlloc::Interrupt | VmAlloc::System)) == VmAlloc::Interrupt {
123 flags = VmAlloc::Interrupt.into();
124 }
125
126 if flags == VmAlloc::Interrupt {
127 todo!()
128 } else if flags == VmAlloc::System {
129 if available <= *stats.interrupt_free_min.read() {
130 let deficit = max(1, flags.get(VmAlloc::Count));
131
132 drop(free_count);
133 drop(cache_count);
134
135 self.pages_deficit[vm].fetch_add(deficit.into(), Ordering::Relaxed);
136 self.wake_pager(vm);
137
138 return None;
139 }
140 } else {
141 todo!()
142 }
143 }
144
145 todo!()
146 }
147
148 fn spawn_pagers(&mut self) {
155 }
158
159 fn wake_pager(&self, _: usize) {
166 todo!()
167 }
168}
169
170#[bitflag(u32)]
172pub enum VmAlloc {
173 Interrupt = 0x00000001,
175 System = 0x00000002,
177 Count(u16) = 0xFFFF0000,
179}
180
181#[derive(Debug, Error)]
183pub enum VmError {}