obkrnl/vm/
mod.rs

1pub use self::object::*;
2pub use self::page::*;
3
4use self::phys::PhysAllocator;
5use self::stats::VmStats;
6use crate::config::{PAGE_SHIFT, PAGE_SIZE};
7use crate::context::{config, current_thread};
8use crate::dmem::Dmem;
9use crate::lock::Mutex;
10use crate::proc::Proc;
11use alloc::sync::{Arc, Weak};
12use alloc::vec::Vec;
13use core::cmp::max;
14use core::fmt::Debug;
15use core::sync::atomic::{AtomicUsize, Ordering};
16use krt::info;
17use macros::bitflag;
18use thiserror::Error;
19
20mod object;
21mod page;
22mod phys;
23mod stats;
24
25/// Implementation of Virtual Memory system.
26pub struct Vm {
27    phys: PhysAllocator,
28    pages: Vec<Arc<VmPage>>, // vm_page_array + vm_page_array_size
29    stats: [Mutex<VmStats>; 2],
30    pagers: [Weak<Proc>; 2],         // pageproc
31    pages_deficit: [AtomicUsize; 2], // vm_pageout_deficit
32}
33
34impl Vm {
35    /// See `vm_page_startup` on the Orbis for a reference.
36    ///
37    /// # Reference offsets
38    /// | Version | Offset |
39    /// |---------|--------|
40    /// |PS4 11.00|0x029200|
41    pub fn new(
42        phys_avail: [u64; 61],
43        ma: Option<&MemAffinity>,
44        dmem: &Dmem,
45    ) -> Result<Arc<Self>, VmError> {
46        let phys = PhysAllocator::new(&phys_avail, ma);
47
48        // Populate vm_page_array. We do a bit different than Orbis here to be able to make segind
49        // immutable.
50        let config = config();
51        let blocked = config.env("vm.blacklist");
52        let unk = dmem.game_end() - dmem.config().fmem_max.get();
53        let mut pages = Vec::new();
54        let mut free_pages = Vec::new();
55        let mut page_count = [0; 2];
56        let mut free_count = [0; 2];
57
58        for i in (0..).step_by(2) {
59            // Check if end entry.
60            let addr = phys_avail[i];
61            let end = phys_avail[i + 1];
62
63            if end == 0 {
64                break;
65            }
66
67            for addr in (addr..end).step_by(PAGE_SIZE.get()) {
68                // Check if blocked address.
69                if blocked.is_some() {
70                    // TODO: We probably want to use None for segment index here. The problem is
71                    // Orbis use zero here.
72                    let pi = pages.len();
73
74                    pages.push(Arc::new(VmPage::new(pi, 0, 0, addr, 0)));
75
76                    todo!();
77                }
78
79                // Check if free page.
80                let vm;
81                let free = if addr < unk || addr >= dmem.game_end() {
82                    // We inline a call to vm_phys_add_page() here.
83                    vm = 0;
84
85                    page_count[0] += 1;
86                    free_count[0] += 1;
87
88                    true
89                } else {
90                    // We inline a call to unknown function here.
91                    vm = 1;
92
93                    page_count[1] += 1;
94
95                    false
96                };
97
98                // Add to list.
99                let pi = pages.len();
100                let seg = phys.segment_index(addr).unwrap();
101                let page = Arc::new(VmPage::new(pi, vm, 0, addr, seg));
102
103                if free {
104                    free_pages.push(page.clone());
105                }
106
107                pages.push(page);
108            }
109        }
110
111        info!(
112            concat!(
113                "VM stats initialized.\n",
114                "v_page_count[0]: {}\n",
115                "v_free_count[0]: {}\n",
116                "v_page_count[1]: {}"
117            ),
118            page_count[0], free_count[0], page_count[1]
119        );
120
121        // Initializes stats. The Orbis initialize these data in vm_pageout function but it is
122        // possible for data race so we do it here instead.
123        let pageout_page_count = 0x10; // TODO: Figure out where this value come from.
124        let free_reserved = [pageout_page_count + 100 + 10, pageout_page_count];
125        let free_min = [free_reserved[0] + 325, free_reserved[1] + 64];
126        let stats = [
127            Mutex::new(VmStats {
128                free_reserved: free_reserved[0],
129                cache_min: if free_count[0] < 2049 {
130                    // TODO: Figure out where 2049 value come from.
131                    0
132                } else if free_count[0] < 6145 {
133                    // TODO: Figure out where 6145 value come from.
134                    free_reserved[0] + free_min[0] * 2
135                } else {
136                    free_reserved[0] + free_min[0] * 4
137                },
138                cache_count: 0,
139                free_count: free_count[0],
140                interrupt_free_min: 2,
141                wire_count: 0,
142            }),
143            Mutex::new(VmStats {
144                free_reserved: free_reserved[1],
145                cache_min: if free_count[1] < 2049 {
146                    // TODO: Figure out where 2049 value come from.
147                    0
148                } else if free_count[1] < 6145 {
149                    // TODO: Figure out where 6145 value come from.
150                    free_reserved[1] + free_min[1] * 2
151                } else {
152                    free_reserved[1] + free_min[1] * 4
153                },
154                cache_count: 0,
155                free_count: free_count[1],
156                interrupt_free_min: 2,
157                wire_count: 0,
158            }),
159        ];
160
161        // Add free pages. The Orbis do this on the above loop but that is not possible for us since
162        // we use that loop to populate vm_page_array.
163        let mut vm = Self {
164            phys,
165            pages,
166            stats,
167            pagers: Default::default(),
168            pages_deficit: [AtomicUsize::new(0), AtomicUsize::new(0)],
169        };
170
171        for page in free_pages {
172            vm.free_page(&page, 0);
173        }
174
175        // Spawn page daemons. The Orbis do this in a separated sysinit but we do it here instead to
176        // keep it in the VM subsystem.
177        vm.spawn_pagers();
178
179        Ok(Arc::new(vm))
180    }
181
182    /// See `vm_page_alloc` on the Orbis for a reference.
183    ///
184    /// # Reference offsets
185    /// | Version | Offset |
186    /// |---------|--------|
187    /// |PS4 11.00|0x02B030|
188    pub fn alloc_page(
189        &self,
190        obj: Option<VmObject>,
191        pindex: usize,
192        flags: VmAlloc,
193    ) -> Option<Arc<VmPage>> {
194        let vm = obj.as_ref().map_or(0, |v| v.vm());
195        let td = current_thread();
196        let mut stats = self.stats[vm].lock();
197        let available = stats.free_count + stats.cache_count;
198
199        if available <= stats.free_reserved {
200            let p = td.proc();
201            let mut flags = if Arc::as_ptr(p) == self.pagers[p.pager()].as_ptr() {
202                VmAlloc::System.into()
203            } else {
204                flags & (VmAlloc::Interrupt | VmAlloc::System)
205            };
206
207            if (flags & (VmAlloc::Interrupt | VmAlloc::System)) == VmAlloc::Interrupt {
208                flags = VmAlloc::Interrupt.into();
209            }
210
211            if flags == VmAlloc::Interrupt {
212                todo!()
213            } else if flags == VmAlloc::System {
214                if available <= stats.interrupt_free_min {
215                    let deficit = max(1, flags.get(VmAlloc::Count));
216
217                    drop(stats);
218
219                    self.pages_deficit[vm].fetch_add(deficit.into(), Ordering::Relaxed);
220                    self.wake_pager(vm);
221
222                    return None;
223                }
224            } else {
225                todo!()
226            }
227        }
228
229        // Allocate VmPage.
230        let page = match &obj {
231            Some(_) => todo!(),
232            None => {
233                if flags.has_any(VmAlloc::Cached) {
234                    return None;
235                }
236
237                self.phys
238                    .alloc_page(&self.pages, vm, obj.is_none().into(), 0)
239            }
240        };
241
242        // The Orbis assume page is never null here.
243        let page = page.unwrap();
244        let mut ps = page.state.lock();
245
246        match ps.flags.has_any(PageFlags::Cached) {
247            true => todo!(),
248            false => stats.free_count -= 1,
249        }
250
251        match ps.flags.has_any(PageFlags::Zero) {
252            true => todo!(),
253            false => ps.flags = PageFlags::zeroed(),
254        }
255
256        ps.access = PageAccess::zeroed();
257
258        // Set oflags.
259        let mut oflags = PageExtFlags::zeroed();
260
261        match &obj {
262            Some(_) => todo!(),
263            None => oflags |= PageExtFlags::Unmanaged,
264        }
265
266        if !flags.has_any(VmAlloc::NoBusy | VmAlloc::NoObj) {
267            oflags |= PageExtFlags::Busy;
268        }
269
270        ps.extended_flags = oflags;
271
272        if flags.has_any(VmAlloc::Wired) {
273            stats.wire_count += 1;
274            ps.wire_count = 1;
275        }
276
277        ps.act_count = 0;
278
279        match &obj {
280            Some(_) => todo!(),
281            None => ps.pindex = pindex,
282        }
283
284        // TODO: Call vdrop.
285        if (stats.cache_count + stats.free_count) < (stats.cache_min + stats.free_reserved) {
286            todo!()
287        }
288
289        // TODO: Set unknown field.
290        drop(ps);
291
292        Some(page)
293    }
294
295    /// `page` must not have active lock on any fields.
296    ///
297    /// See `vm_phys_free_pages` on the Orbis for a reference.
298    ///
299    /// # Reference offsets
300    /// | Version | Offset |
301    /// |---------|--------|
302    /// |PS4 11.00|0x15FCB0|
303    fn free_page(&self, page: &Arc<VmPage>, mut order: usize) {
304        // Get segment the page belong to.
305        let mut page = page; // For scoped lifetime.
306        let vm = page.vm;
307        let mut pa = page.addr;
308        let seg = if (page.unk1 & 1) == 0 {
309            self.phys.segment(page.segment)
310        } else {
311            todo!()
312        };
313
314        // TODO: What is this?
315        let mut queues = seg.free_queues.lock();
316        let mut ps = page.state.lock();
317
318        while order < 12 {
319            let start = seg.start;
320            let buddy_pa = pa ^ (1u64 << (order + PAGE_SHIFT)); // TODO: What is this?
321
322            if buddy_pa < start || buddy_pa >= seg.end {
323                break;
324            }
325
326            // Get buddy page index.
327            let i = (buddy_pa - start) >> PAGE_SHIFT;
328            let buddy = &self.pages[seg.first_page + usize::try_from(i).unwrap()];
329            let mut bs = buddy.state.lock();
330
331            if bs.order != order || buddy.vm != vm || ((page.unk1 ^ buddy.unk1) & 1) != 0 {
332                break;
333            }
334
335            // TODO: Check if we really need to preserve page order here. If not we need to replace
336            // IndexMap with HashMap otherwise we need to find a better solution than IndexMap.
337            queues[vm][bs.pool][bs.order].shift_remove(buddy);
338            bs.order = VmPage::FREE_ORDER;
339
340            if bs.pool != ps.pool {
341                todo!()
342            }
343
344            drop(bs);
345
346            order += 1;
347            pa &= !((1u64 << (order + PAGE_SHIFT)) - 1);
348            page =
349                &self.pages[seg.first_page + usize::try_from((pa - start) >> PAGE_SHIFT).unwrap()];
350            ps = page.state.lock();
351        }
352
353        // Add to free queue.
354        ps.order = order;
355        queues[vm][ps.pool][order].insert(page.clone());
356    }
357
358    /// See `kick_pagedaemons` on the Orbis for a reference.
359    ///
360    /// # Reference offsets
361    /// | Version | Offset |
362    /// |---------|--------|
363    /// |PS4 11.00|0x3E0E40|
364    fn spawn_pagers(&mut self) {
365        // TODO: This requires v_page_count that populated by vm_page_startup. In order to populate
366        // this we need phys_avail that populated by getmemsize.
367    }
368
369    /// See `pagedaemon_wakeup` on the Orbis for a reference.
370    ///
371    /// # Reference offsets
372    /// | Version | Offset |
373    /// |---------|--------|
374    /// |PS4 11.00|0x3E0690|
375    fn wake_pager(&self, _: usize) {
376        todo!()
377    }
378}
379
380/// Implementation of `mem_affinity` structure.
381pub struct MemAffinity {}
382
383/// Flags for [Vm::alloc_page()].
384#[bitflag(u32)]
385pub enum VmAlloc {
386    /// `VM_ALLOC_INTERRUPT`.
387    Interrupt = 0x00000001,
388    /// `VM_ALLOC_SYSTEM`.
389    System = 0x00000002,
390    /// `VM_ALLOC_WIRED`.
391    Wired = 0x00000020,
392    /// `VM_ALLOC_NOOBJ`.
393    NoObj = 0x00000100,
394    /// `VM_ALLOC_NOBUSY`.
395    NoBusy = 0x00000200,
396    /// `VM_ALLOC_IFCACHED`.
397    Cached = 0x00000400,
398    /// `VM_ALLOC_COUNT`.
399    Count(u16) = 0xFFFF0000,
400}
401
402/// Represents an error when [`Vm::new()`] fails.
403#[derive(Debug, Error)]
404pub enum VmError {}