obkrnl/vm/
mod.rs

1pub use self::object::*;
2pub use self::page::*;
3
4use self::phys::PhysAllocator;
5use self::stats::VmStats;
6use crate::config::{PAGE_SHIFT, PAGE_SIZE};
7use crate::context::{config, current_thread};
8use crate::dmem::Dmem;
9use crate::lock::Mutex;
10use crate::proc::Proc;
11use alloc::sync::{Arc, Weak};
12use alloc::vec::Vec;
13use core::cmp::max;
14use core::fmt::Debug;
15use core::sync::atomic::{AtomicUsize, Ordering};
16use krt::info;
17use macros::bitflag;
18use thiserror::Error;
19
20#[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")]
21#[cfg_attr(target_arch = "x86_64", path = "x86_64.rs")]
22mod arch;
23mod object;
24mod page;
25mod phys;
26mod stats;
27
28/// Implementation of Virtual Memory system.
29pub struct Vm {
30    phys: PhysAllocator,
31    pages: Vec<Arc<VmPage>>, // vm_page_array + vm_page_array_size
32    stats: [Mutex<VmStats>; 2],
33    pagers: [Weak<Proc>; 2],         // pageproc
34    pages_deficit: [AtomicUsize; 2], // vm_pageout_deficit
35}
36
37impl Vm {
38    /// See `vm_page_startup` on the Orbis for a reference.
39    ///
40    /// # Reference offsets
41    /// | Version | Offset |
42    /// |---------|--------|
43    /// |PS4 11.00|0x029200|
44    pub fn new(
45        phys_avail: [usize; 61],
46        ma: Option<&MemAffinity>,
47        dmem: &Dmem,
48    ) -> Result<Arc<Self>, VmError> {
49        let phys = PhysAllocator::new(&phys_avail, ma);
50
51        // Populate vm_page_array. We do a bit different than Orbis here to be able to make segind
52        // immutable.
53        let config = config();
54        let blocked = config.env("vm.blacklist");
55        let unk = dmem.game_end() - dmem.config().fmem_max.get();
56        let mut pages = Vec::new();
57        let mut free_pages = Vec::new();
58        let mut page_count = [0; 2];
59        let mut free_count = [0; 2];
60
61        for i in (0..).step_by(2) {
62            // Check if end entry.
63            let addr = phys_avail[i];
64            let end = phys_avail[i + 1];
65
66            if end == 0 {
67                break;
68            }
69
70            for addr in (addr..end).step_by(PAGE_SIZE.get()) {
71                // Check if blocked address.
72                if blocked.is_some() {
73                    // TODO: We probably want to use None for segment index here. The problem is
74                    // Orbis use zero here.
75                    let pi = pages.len();
76
77                    pages.push(Arc::new(VmPage::new(pi, 0, 0, addr, 0)));
78
79                    todo!();
80                }
81
82                // Check if free page.
83                let vm;
84                let free = if addr < unk || addr >= dmem.game_end() {
85                    // We inline a call to vm_phys_add_page() here.
86                    vm = 0;
87
88                    page_count[0] += 1;
89                    free_count[0] += 1;
90
91                    true
92                } else {
93                    // We inline a call to unknown function here.
94                    vm = 1;
95
96                    page_count[1] += 1;
97
98                    false
99                };
100
101                // Add to list.
102                let pi = pages.len();
103                let seg = phys.segment_index(addr).unwrap();
104                let page = Arc::new(VmPage::new(pi, vm, 0, addr, seg));
105
106                if free {
107                    free_pages.push(page.clone());
108                }
109
110                pages.push(page);
111            }
112        }
113
114        info!(
115            concat!(
116                "VM stats initialized.\n",
117                "v_page_count[0]: {}\n",
118                "v_free_count[0]: {}\n",
119                "v_page_count[1]: {}"
120            ),
121            page_count[0], free_count[0], page_count[1]
122        );
123
124        // Initializes stats. The Orbis initialize these data in vm_pageout function but it is
125        // possible for data race so we do it here instead.
126        let pageout_page_count = 0x10; // TODO: Figure out where this value come from.
127        let free_reserved = [pageout_page_count + 100 + 10, pageout_page_count];
128        let free_min = [free_reserved[0] + 325, free_reserved[1] + 64];
129        let stats = [
130            Mutex::new(VmStats {
131                free_reserved: free_reserved[0],
132                cache_min: if free_count[0] < 2049 {
133                    // TODO: Figure out where 2049 value come from.
134                    0
135                } else if free_count[0] < 6145 {
136                    // TODO: Figure out where 6145 value come from.
137                    free_reserved[0] + free_min[0] * 2
138                } else {
139                    free_reserved[0] + free_min[0] * 4
140                },
141                cache_count: 0,
142                free_count: free_count[0],
143                interrupt_free_min: 2,
144                wire_count: 0,
145            }),
146            Mutex::new(VmStats {
147                free_reserved: free_reserved[1],
148                cache_min: if free_count[1] < 2049 {
149                    // TODO: Figure out where 2049 value come from.
150                    0
151                } else if free_count[1] < 6145 {
152                    // TODO: Figure out where 6145 value come from.
153                    free_reserved[1] + free_min[1] * 2
154                } else {
155                    free_reserved[1] + free_min[1] * 4
156                },
157                cache_count: 0,
158                free_count: free_count[1],
159                interrupt_free_min: 2,
160                wire_count: 0,
161            }),
162        ];
163
164        // Add free pages. The Orbis do this on the above loop but that is not possible for us since
165        // we use that loop to populate vm_page_array.
166        let mut vm = Self {
167            phys,
168            pages,
169            stats,
170            pagers: Default::default(),
171            pages_deficit: [AtomicUsize::new(0), AtomicUsize::new(0)],
172        };
173
174        for page in free_pages {
175            vm.free_page(&page, 0);
176        }
177
178        // Spawn page daemons. The Orbis do this in a separated sysinit but we do it here instead to
179        // keep it in the VM subsystem.
180        vm.spawn_pagers();
181
182        Ok(Arc::new(vm))
183    }
184
185    /// See `vm_page_alloc` on the Orbis for a reference.
186    ///
187    /// # Reference offsets
188    /// | Version | Offset |
189    /// |---------|--------|
190    /// |PS4 11.00|0x02B030|
191    pub fn alloc_page(
192        &self,
193        obj: Option<VmObject>,
194        pindex: usize,
195        flags: VmAlloc,
196    ) -> Option<Arc<VmPage>> {
197        let vm = obj.as_ref().map_or(0, |v| v.vm());
198        let td = current_thread();
199        let mut stats = self.stats[vm].lock();
200        let available = stats.free_count + stats.cache_count;
201
202        if available <= stats.free_reserved {
203            let p = td.proc();
204            let mut flags = if Arc::as_ptr(p) == self.pagers[p.pager()].as_ptr() {
205                VmAlloc::System.into()
206            } else {
207                flags & (VmAlloc::Interrupt | VmAlloc::System)
208            };
209
210            if (flags & (VmAlloc::Interrupt | VmAlloc::System)) == VmAlloc::Interrupt {
211                flags = VmAlloc::Interrupt.into();
212            }
213
214            if flags == VmAlloc::Interrupt {
215                todo!()
216            } else if flags == VmAlloc::System {
217                if available <= stats.interrupt_free_min {
218                    let deficit = max(1, flags.get(VmAlloc::Count));
219
220                    drop(stats);
221
222                    self.pages_deficit[vm].fetch_add(deficit.into(), Ordering::Relaxed);
223                    self.wake_pager(vm);
224
225                    return None;
226                }
227            } else {
228                todo!()
229            }
230        }
231
232        // Allocate VmPage.
233        let page = match &obj {
234            Some(_) => todo!(),
235            None => {
236                if flags.has_any(VmAlloc::Cached) {
237                    return None;
238                }
239
240                self.phys
241                    .alloc_page(&self.pages, vm, obj.is_none().into(), 0)
242            }
243        };
244
245        // The Orbis assume page is never null here.
246        let page = page.unwrap();
247        let mut ps = page.state.lock();
248
249        match ps.flags.has_any(PageFlags::Cached) {
250            true => todo!(),
251            false => stats.free_count -= 1,
252        }
253
254        match ps.flags.has_any(PageFlags::Zero) {
255            true => todo!(),
256            false => ps.flags = PageFlags::zeroed(),
257        }
258
259        ps.access = PageAccess::zeroed();
260
261        // Set oflags.
262        let mut oflags = PageExtFlags::zeroed();
263
264        match &obj {
265            Some(_) => todo!(),
266            None => oflags |= PageExtFlags::Unmanaged,
267        }
268
269        if !flags.has_any(VmAlloc::NoBusy | VmAlloc::NoObj) {
270            oflags |= PageExtFlags::Busy;
271        }
272
273        ps.extended_flags = oflags;
274
275        if flags.has_any(VmAlloc::Wired) {
276            stats.wire_count += 1;
277            ps.wire_count = 1;
278        }
279
280        ps.act_count = 0;
281
282        match &obj {
283            Some(_) => todo!(),
284            None => ps.pindex = pindex,
285        }
286
287        // TODO: Call vdrop.
288        if (stats.cache_count + stats.free_count) < (stats.cache_min + stats.free_reserved) {
289            todo!()
290        }
291
292        // TODO: Set unknown field.
293        drop(ps);
294
295        Some(page)
296    }
297
298    /// `page` must not have active lock on any fields.
299    ///
300    /// See `vm_phys_free_pages` on the Orbis for a reference.
301    ///
302    /// # Reference offsets
303    /// | Version | Offset |
304    /// |---------|--------|
305    /// |PS4 11.00|0x15FCB0|
306    fn free_page(&self, page: &Arc<VmPage>, mut order: usize) {
307        // Get segment the page belong to.
308        let mut page = page; // For scoped lifetime.
309        let vm = page.vm;
310        let mut pa = page.addr;
311        let seg = if (page.unk1 & 1) == 0 {
312            self.phys.segment(page.segment)
313        } else {
314            todo!()
315        };
316
317        // TODO: What is this?
318        let mut queues = seg.free_queues.lock();
319        let mut ps = page.state.lock();
320
321        while order < 12 {
322            let start = seg.start;
323            let buddy_pa = pa ^ (1usize << (order + PAGE_SHIFT)); // TODO: What is this?
324
325            if buddy_pa < start || buddy_pa >= seg.end {
326                break;
327            }
328
329            // Get buddy page index.
330            let buddy = &self.pages[seg.first_page + ((buddy_pa - start) >> PAGE_SHIFT)];
331            let mut bs = buddy.state.lock();
332
333            if bs.order != order || buddy.vm != vm || ((page.unk1 ^ buddy.unk1) & 1) != 0 {
334                break;
335            }
336
337            // TODO: Check if we really need to preserve page order here. If not we need to replace
338            // IndexMap with HashMap otherwise we need to find a better solution than IndexMap.
339            queues[vm][bs.pool][bs.order].shift_remove(buddy);
340            bs.order = VmPage::FREE_ORDER;
341
342            if bs.pool != ps.pool {
343                todo!()
344            }
345
346            drop(bs);
347
348            order += 1;
349            pa &= !((1usize << (order + PAGE_SHIFT)) - 1);
350            page = &self.pages[seg.first_page + ((pa - start) >> PAGE_SHIFT)];
351            ps = page.state.lock();
352        }
353
354        // Add to free queue.
355        ps.order = order;
356        queues[vm][ps.pool][order].insert(page.clone());
357    }
358
359    /// See `kick_pagedaemons` on the Orbis for a reference.
360    ///
361    /// # Reference offsets
362    /// | Version | Offset |
363    /// |---------|--------|
364    /// |PS4 11.00|0x3E0E40|
365    fn spawn_pagers(&mut self) {
366        // TODO: This requires v_page_count that populated by vm_page_startup. In order to populate
367        // this we need phys_avail that populated by getmemsize.
368    }
369
370    /// See `pagedaemon_wakeup` on the Orbis for a reference.
371    ///
372    /// # Reference offsets
373    /// | Version | Offset |
374    /// |---------|--------|
375    /// |PS4 11.00|0x3E0690|
376    fn wake_pager(&self, _: usize) {
377        todo!()
378    }
379}
380
381/// Implementation of `mem_affinity` structure.
382pub struct MemAffinity {}
383
384/// Flags for [Vm::alloc_page()].
385#[bitflag(u32)]
386pub enum VmAlloc {
387    /// `VM_ALLOC_INTERRUPT`.
388    Interrupt = 0x00000001,
389    /// `VM_ALLOC_SYSTEM`.
390    System = 0x00000002,
391    /// `VM_ALLOC_WIRED`.
392    Wired = 0x00000020,
393    /// `VM_ALLOC_NOOBJ`.
394    NoObj = 0x00000100,
395    /// `VM_ALLOC_NOBUSY`.
396    NoBusy = 0x00000200,
397    /// `VM_ALLOC_IFCACHED`.
398    Cached = 0x00000400,
399    /// `VM_ALLOC_COUNT`.
400    Count(u16) = 0xFFFF0000,
401}
402
403/// Represents an error when [`Vm::new()`] fails.
404#[derive(Debug, Error)]
405pub enum VmError {}