obkrnl/vm/
mod.rs

1pub use self::object::*;
2pub use self::page::*;
3
4use self::phys::PhysAllocator;
5use self::stats::VmStats;
6use crate::config::{PAGE_SHIFT, PAGE_SIZE};
7use crate::context::{config, current_thread};
8use crate::dmem::Dmem;
9use crate::lock::GutexGroup;
10use crate::proc::Proc;
11use alloc::sync::{Arc, Weak};
12use alloc::vec::Vec;
13use core::cmp::max;
14use core::fmt::Debug;
15use core::sync::atomic::{AtomicUsize, Ordering};
16use krt::info;
17use macros::bitflag;
18use thiserror::Error;
19
20mod object;
21mod page;
22mod phys;
23mod stats;
24
25/// Implementation of Virtual Memory system.
26pub struct Vm {
27    phys: PhysAllocator,
28    pages: Vec<Arc<VmPage>>, // vm_page_array + vm_page_array_size
29    stats: [VmStats; 2],
30    pagers: [Weak<Proc>; 2],         // pageproc
31    pages_deficit: [AtomicUsize; 2], // vm_pageout_deficit
32}
33
34impl Vm {
35    /// See `vm_page_startup` on the Orbis for a reference.
36    ///
37    /// # Reference offsets
38    /// | Version | Offset |
39    /// |---------|--------|
40    /// |PS4 11.00|0x029200|
41    pub fn new(
42        phys_avail: [u64; 61],
43        ma: Option<&MemAffinity>,
44        dmem: &Dmem,
45    ) -> Result<Arc<Self>, VmError> {
46        let phys = PhysAllocator::new(&phys_avail, ma);
47
48        // Populate vm_page_array. We do a bit different than Orbis here to be able to make segind
49        // immutable.
50        let config = config();
51        let blocked = config.env("vm.blacklist");
52        let unk = dmem.game_end() - dmem.config().fmem_max.get();
53        let mut pages = Vec::new();
54        let mut free_pages = Vec::new();
55        let mut page_count = [0; 2];
56        let mut free_count = [0; 2];
57
58        for i in (0..).step_by(2) {
59            // Check if end entry.
60            let addr = phys_avail[i];
61            let end = phys_avail[i + 1];
62
63            if end == 0 {
64                break;
65            }
66
67            for addr in (addr..end).step_by(PAGE_SIZE.get()) {
68                // Check if blocked address.
69                if blocked.is_some() {
70                    // TODO: We probably want to use None for segment index here. The problem is
71                    // Orbis use zero here.
72                    let pi = pages.len();
73
74                    pages.push(Arc::new(VmPage::new(pi, 0, 0, addr, 0)));
75
76                    todo!();
77                }
78
79                // Check if free page.
80                let vm;
81                let free = if addr < unk || addr >= dmem.game_end() {
82                    // We inline a call to vm_phys_add_page() here.
83                    vm = 0;
84
85                    page_count[0] += 1;
86                    free_count[0] += 1;
87
88                    true
89                } else {
90                    // We inline a call to unknown function here.
91                    vm = 1;
92
93                    page_count[1] += 1;
94
95                    false
96                };
97
98                // Add to list.
99                let pi = pages.len();
100                let seg = phys.segment_index(addr).unwrap();
101                let page = Arc::new(VmPage::new(pi, vm, 0, addr, seg));
102
103                if free {
104                    free_pages.push(page.clone());
105                }
106
107                pages.push(page);
108            }
109        }
110
111        info!(
112            concat!(
113                "VM stats initialized.\n",
114                "v_page_count[0]: {}\n",
115                "v_free_count[0]: {}\n",
116                "v_page_count[1]: {}"
117            ),
118            page_count[0], free_count[0], page_count[1]
119        );
120
121        // Initializes stats. The Orbis initialize these data in vm_pageout function but it is
122        // possible for data race so we do it here instead.
123        let pageout_page_count = 0x10; // TODO: Figure out where this value come from.
124        let gg = GutexGroup::new();
125        let stats = [
126            VmStats {
127                free_reserved: pageout_page_count + 100 + 10,
128                cache_count: gg.clone().spawn_default(),
129                free_count: gg.clone().spawn(free_count[0]),
130                interrupt_free_min: gg.clone().spawn(2),
131            },
132            VmStats {
133                free_reserved: pageout_page_count,
134                cache_count: gg.clone().spawn_default(),
135                free_count: gg.clone().spawn(free_count[1]),
136                interrupt_free_min: gg.clone().spawn(2),
137            },
138        ];
139
140        // Add free pages. The Orbis do this on the above loop but that is not possible for us since
141        // we use that loop to populate vm_page_array.
142        let mut vm = Self {
143            phys,
144            pages,
145            stats,
146            pagers: Default::default(),
147            pages_deficit: [AtomicUsize::new(0), AtomicUsize::new(0)],
148        };
149
150        for page in free_pages {
151            vm.free_page(&page, 0);
152        }
153
154        // Spawn page daemons. The Orbis do this in a separated sysinit but we do it here instead to
155        // keep it in the VM subsystem.
156        vm.spawn_pagers();
157
158        Ok(Arc::new(vm))
159    }
160
161    /// See `vm_page_alloc` on the Orbis for a reference.
162    ///
163    /// # Reference offsets
164    /// | Version | Offset |
165    /// |---------|--------|
166    /// |PS4 11.00|0x02B030|
167    pub fn alloc_page(&self, obj: Option<VmObject>, flags: VmAlloc) -> Option<VmPage> {
168        let vm = obj.as_ref().map_or(0, |v| v.vm());
169        let td = current_thread();
170        let stats = &self.stats[vm];
171        let cache_count = stats.cache_count.read();
172        let mut free_count = stats.free_count.write();
173        let available = *free_count + *cache_count;
174
175        if available <= stats.free_reserved {
176            let p = td.proc();
177            let mut flags = if Arc::as_ptr(p) == self.pagers[p.pager()].as_ptr() {
178                VmAlloc::System.into()
179            } else {
180                flags & (VmAlloc::Interrupt | VmAlloc::System)
181            };
182
183            if (flags & (VmAlloc::Interrupt | VmAlloc::System)) == VmAlloc::Interrupt {
184                flags = VmAlloc::Interrupt.into();
185            }
186
187            if flags == VmAlloc::Interrupt {
188                todo!()
189            } else if flags == VmAlloc::System {
190                if available <= *stats.interrupt_free_min.read() {
191                    let deficit = max(1, flags.get(VmAlloc::Count));
192
193                    drop(free_count);
194                    drop(cache_count);
195
196                    self.pages_deficit[vm].fetch_add(deficit.into(), Ordering::Relaxed);
197                    self.wake_pager(vm);
198
199                    return None;
200                }
201            } else {
202                todo!()
203            }
204        }
205
206        // Allocate VmPage.
207        let page = match &obj {
208            Some(_) => todo!(),
209            None => {
210                if flags.has_any(VmAlloc::Cached) {
211                    return None;
212                }
213
214                self.phys
215                    .alloc_page(&self.pages, vm, obj.is_none().into(), 0)
216            }
217        };
218
219        // The Orbis assume page is never null here.
220        let page = page.unwrap();
221        let mut pf = page.flags().lock();
222
223        match pf.has_any(PageFlags::Cached) {
224            true => todo!(),
225            false => *free_count -= 1,
226        }
227
228        match pf.has_any(PageFlags::Zero) {
229            true => todo!(),
230            false => *pf = PageFlags::zeroed(),
231        }
232
233        drop(pf);
234
235        *page.access().lock() = PageAccess::zeroed();
236
237        // Set oflags.
238        let mut oflags = PageExtFlags::zeroed();
239
240        match &obj {
241            Some(_) => todo!(),
242            None => oflags |= PageExtFlags::Unmanaged,
243        }
244
245        if !flags.has_any(VmAlloc::NoBusy | VmAlloc::NoObj) {
246            oflags |= PageExtFlags::Busy;
247        }
248
249        *page.extended_flags().lock() = oflags;
250
251        if flags.has_any(VmAlloc::Wired) {
252            todo!();
253        }
254
255        todo!()
256    }
257
258    /// `page` must not have active lock on any fields.
259    ///
260    /// See `vm_phys_free_pages` on the Orbis for a reference.
261    ///
262    /// # Reference offsets
263    /// | Version | Offset |
264    /// |---------|--------|
265    /// |PS4 11.00|0x15FCB0|
266    fn free_page(&self, page: &Arc<VmPage>, mut order: usize) {
267        // Get segment the page belong to.
268        let mut page = page; // For scoped lifetime.
269        let vm = page.vm();
270        let mut pa = page.addr();
271        let seg = if (page.unk1() & 1) == 0 {
272            self.phys.segment(page.segment())
273        } else {
274            todo!()
275        };
276
277        // TODO: What is this?
278        let mut queues = seg.free_queues.lock();
279        let mut po = page.order().lock();
280        let mut pp = page.pool().lock();
281
282        while order < 12 {
283            let start = seg.start;
284            let buddy_pa = pa ^ (1u64 << (order + PAGE_SHIFT)); // TODO: What is this?
285
286            if buddy_pa < start || buddy_pa >= seg.end {
287                break;
288            }
289
290            // Get buddy page index.
291            let i = (buddy_pa - start) >> PAGE_SHIFT;
292            let buddy = &self.pages[seg.first_page + usize::try_from(i).unwrap()];
293            let mut bo = buddy.order().lock();
294
295            if *bo != order || buddy.vm() != vm || ((page.unk1() ^ buddy.unk1()) & 1) != 0 {
296                break;
297            }
298
299            // TODO: Check if we really need to preserve page order here. If not we need to replace
300            // IndexMap with HashMap otherwise we need to find a better solution than IndexMap.
301            let bp = buddy.pool().lock();
302
303            queues[vm][*bp][*bo].shift_remove(buddy);
304            *bo = VmPage::FREE_ORDER;
305
306            if *bp != *pp {
307                todo!()
308            }
309
310            drop(bp);
311            drop(bo);
312
313            order += 1;
314            pa &= !((1u64 << (order + PAGE_SHIFT)) - 1);
315            page =
316                &self.pages[seg.first_page + usize::try_from((pa - start) >> PAGE_SHIFT).unwrap()];
317            po = page.order().lock();
318            pp = page.pool().lock();
319        }
320
321        // Add to free queue.
322        *po = order;
323        queues[vm][*pp][order].insert(page.clone());
324    }
325
326    /// See `kick_pagedaemons` on the Orbis for a reference.
327    ///
328    /// # Reference offsets
329    /// | Version | Offset |
330    /// |---------|--------|
331    /// |PS4 11.00|0x3E0E40|
332    fn spawn_pagers(&mut self) {
333        // TODO: This requires v_page_count that populated by vm_page_startup. In order to populate
334        // this we need phys_avail that populated by getmemsize.
335    }
336
337    /// See `pagedaemon_wakeup` on the Orbis for a reference.
338    ///
339    /// # Reference offsets
340    /// | Version | Offset |
341    /// |---------|--------|
342    /// |PS4 11.00|0x3E0690|
343    fn wake_pager(&self, _: usize) {
344        todo!()
345    }
346}
347
348/// Implementation of `mem_affinity` structure.
349pub struct MemAffinity {}
350
351/// Flags for [Vm::alloc_page()].
352#[bitflag(u32)]
353pub enum VmAlloc {
354    /// `VM_ALLOC_INTERRUPT`.
355    Interrupt = 0x00000001,
356    /// `VM_ALLOC_SYSTEM`.
357    System = 0x00000002,
358    /// `VM_ALLOC_WIRED`.
359    Wired = 0x00000020,
360    /// `VM_ALLOC_NOOBJ`.
361    NoObj = 0x00000100,
362    /// `VM_ALLOC_NOBUSY`.
363    NoBusy = 0x00000200,
364    /// `VM_ALLOC_IFCACHED`.
365    Cached = 0x00000400,
366    /// `VM_ALLOC_COUNT`.
367    Count(u16) = 0xFFFF0000,
368}
369
370/// Represents an error when [`Vm::new()`] fails.
371#[derive(Debug, Error)]
372pub enum VmError {}