obkrnl/vm/
mod.rs

1pub use self::object::*;
2pub use self::page::*;
3
4use self::phys::PhysAllocator;
5use self::stats::VmStats;
6use crate::config::PAGE_SIZE;
7use crate::context::{config, current_thread};
8use crate::dmem::Dmem;
9use crate::lock::GutexGroup;
10use crate::proc::Proc;
11use alloc::sync::{Arc, Weak};
12use core::cmp::max;
13use core::fmt::Debug;
14use core::sync::atomic::{AtomicUsize, Ordering};
15use krt::info;
16use macros::bitflag;
17use thiserror::Error;
18
19mod object;
20mod page;
21mod phys;
22mod stats;
23
24/// Implementation of Virtual Memory system.
25pub struct Vm {
26    phys: PhysAllocator,
27    stats: [VmStats; 2],
28    pagers: [Weak<Proc>; 2],         // pageproc
29    pages_deficit: [AtomicUsize; 2], // vm_pageout_deficit
30}
31
32impl Vm {
33    /// See `vm_page_startup` on the Orbis for a reference.
34    ///
35    /// # Reference offsets
36    /// | Version | Offset |
37    /// |---------|--------|
38    /// |PS4 11.00|0x029200|
39    pub fn new(
40        phys_avail: [u64; 61],
41        ma: Option<&MemAffinity>,
42        dmem: &Dmem,
43    ) -> Result<Arc<Self>, VmError> {
44        let mut phys = PhysAllocator::new(&phys_avail, ma);
45
46        // Get initial v_page_count and v_free_count.
47        let page_size = u64::try_from(PAGE_SIZE.get()).unwrap();
48        let config = config();
49        let blocked = config.env("vm.blacklist");
50        let unk = dmem.game_end() - dmem.config().fmem_max.get();
51        let mut page_count = [0; 2];
52        let mut free_count = [0; 2];
53
54        for i in (0..).step_by(2) {
55            // Check if end entry.
56            let mut addr = phys_avail[i];
57            let end = phys_avail[i + 1];
58
59            if end == 0 {
60                break;
61            }
62
63            while addr < end {
64                if blocked.is_some() {
65                    todo!();
66                }
67
68                phys.page_for(addr);
69
70                if addr < unk || dmem.game_end() <= addr {
71                    // We inline a call to vm_phys_add_page() here.
72                    page_count[0] += 1;
73                    free_count[0] += 1;
74                } else {
75                    // We inline a call to unknown function here.
76                    page_count[1] += 1;
77                }
78
79                addr += page_size;
80            }
81        }
82
83        info!(
84            concat!(
85                "VM stats initialized.\n",
86                "v_page_count[0]: {}\n",
87                "v_free_count[0]: {}\n",
88                "v_page_count[1]: {}"
89            ),
90            page_count[0], free_count[0], page_count[1]
91        );
92
93        // Initializes stats. The Orbis initialize these data in vm_pageout function but it is
94        // possible for data race so we do it here instead.
95        let pageout_page_count = 0x10; // TODO: Figure out where this value come from.
96        let gg = GutexGroup::new();
97        let stats = [
98            VmStats {
99                free_reserved: pageout_page_count + 100 + 10,
100                cache_count: gg.clone().spawn_default(),
101                free_count: gg.clone().spawn(free_count[0]),
102                interrupt_free_min: gg.clone().spawn(2),
103            },
104            VmStats {
105                free_reserved: pageout_page_count,
106                cache_count: gg.clone().spawn_default(),
107                free_count: gg.clone().spawn(free_count[1]),
108                interrupt_free_min: gg.clone().spawn(2),
109            },
110        ];
111
112        // Spawn page daemons. The Orbis do this in a separated sysinit but we do it here instead to
113        // keep it in the VM subsystem.
114        let mut vm = Self {
115            phys,
116            stats,
117            pagers: Default::default(),
118            pages_deficit: [AtomicUsize::new(0), AtomicUsize::new(0)],
119        };
120
121        vm.spawn_pagers();
122
123        Ok(Arc::new(vm))
124    }
125
126    /// See `vm_page_alloc` on the Orbis for a reference.
127    ///
128    /// # Reference offsets
129    /// | Version | Offset |
130    /// |---------|--------|
131    /// |PS4 11.00|0x02B030|
132    pub fn alloc_page(&self, obj: Option<VmObject>, flags: VmAlloc) -> Option<VmPage> {
133        let vm = obj.as_ref().map_or(0, |v| v.vm());
134        let td = current_thread();
135        let stats = &self.stats[vm];
136        let cache_count = stats.cache_count.read();
137        let free_count = stats.free_count.read();
138        let available = *free_count + *cache_count;
139
140        if available <= stats.free_reserved {
141            let p = td.proc();
142            let mut flags = if Arc::as_ptr(p) == self.pagers[p.pager()].as_ptr() {
143                VmAlloc::System.into()
144            } else {
145                flags & (VmAlloc::Interrupt | VmAlloc::System)
146            };
147
148            if (flags & (VmAlloc::Interrupt | VmAlloc::System)) == VmAlloc::Interrupt {
149                flags = VmAlloc::Interrupt.into();
150            }
151
152            if flags == VmAlloc::Interrupt {
153                todo!()
154            } else if flags == VmAlloc::System {
155                if available <= *stats.interrupt_free_min.read() {
156                    let deficit = max(1, flags.get(VmAlloc::Count));
157
158                    drop(free_count);
159                    drop(cache_count);
160
161                    self.pages_deficit[vm].fetch_add(deficit.into(), Ordering::Relaxed);
162                    self.wake_pager(vm);
163
164                    return None;
165                }
166            } else {
167                todo!()
168            }
169        }
170
171        // Allocate VmPage.
172        let page = match &obj {
173            Some(_) => todo!(),
174            None => {
175                if flags.has_any(VmAlloc::Cached) {
176                    return None;
177                }
178
179                self.phys.alloc_page(vm, obj.is_none().into(), 0)
180            }
181        };
182
183        // TODO: The Orbis assume page is never null here.
184        let page = page.unwrap();
185
186        match page.flags().has_any(PageFlags::Cached) {
187            true => todo!(),
188            false => todo!(),
189        }
190    }
191
192    /// See `kick_pagedaemons` on the Orbis for a reference.
193    ///
194    /// # Reference offsets
195    /// | Version | Offset |
196    /// |---------|--------|
197    /// |PS4 11.00|0x3E0E40|
198    fn spawn_pagers(&mut self) {
199        // TODO: This requires v_page_count that populated by vm_page_startup. In order to populate
200        // this we need phys_avail that populated by getmemsize.
201    }
202
203    /// See `pagedaemon_wakeup` on the Orbis for a reference.
204    ///
205    /// # Reference offsets
206    /// | Version | Offset |
207    /// |---------|--------|
208    /// |PS4 11.00|0x3E0690|
209    fn wake_pager(&self, _: usize) {
210        todo!()
211    }
212}
213
214/// Implementation of `mem_affinity` structure.
215pub struct MemAffinity {}
216
217/// Flags for [`Vm::alloc_page()`].
218#[bitflag(u32)]
219pub enum VmAlloc {
220    /// `VM_ALLOC_INTERRUPT`.
221    Interrupt = 0x00000001,
222    /// `VM_ALLOC_SYSTEM`.
223    System = 0x00000002,
224    /// `VM_ALLOC_IFCACHED`.
225    Cached = 0x00000400,
226    /// `VM_ALLOC_COUNT`.
227    Count(u16) = 0xFFFF0000,
228}
229
230/// Represents an error when [`Vm::new()`] fails.
231#[derive(Debug, Error)]
232pub enum VmError {}