obkrnl/
main.rs

1#![no_std]
2#![cfg_attr(not(test), no_main)]
3
4use self::config::{Config, Dipsw, PAGE_MASK, PAGE_SHIFT, PAGE_SIZE, Param1};
5use self::context::{ContextSetup, arch, config};
6use self::dmem::Dmem;
7use self::imgact::Ps4Abi;
8use self::malloc::KernelHeap;
9use self::proc::{Fork, Proc, ProcAbi, ProcMgr, Thread};
10use self::sched::sleep;
11use self::uma::Uma;
12use self::vm::Vm;
13use ::config::{BootEnv, MapType};
14use alloc::string::String;
15use alloc::sync::Arc;
16use core::cmp::min;
17use core::fmt::Write;
18use core::mem::zeroed;
19use humansize::{DECIMAL, SizeFormatter};
20use krt::{boot_env, info, warn};
21
22#[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")]
23#[cfg_attr(target_arch = "x86_64", path = "x86_64.rs")]
24mod arch;
25mod config;
26mod context;
27mod dmem;
28mod event;
29mod imgact;
30mod imgfmt;
31mod lock;
32mod malloc;
33mod proc;
34mod sched;
35mod signal;
36mod subsystem;
37mod trap;
38mod uma;
39mod vm;
40
41extern crate alloc;
42
43/// This will be called by [`krt`] crate.
44///
45/// See Orbis kernel entry point for a reference.
46#[cfg_attr(target_os = "none", unsafe(no_mangle))]
47fn main(map: &'static ::config::KernelMap, config: &'static ::config::Config) -> ! {
48    // SAFETY: This function has a lot of restrictions. See Context documentation for more details.
49    let config = Config::new(config);
50    let params1 = Param1::new(&config);
51    let cpu = self::arch::identify_cpu();
52    let hw = match boot_env() {
53        BootEnv::Vm(vm) => vm.hypervisor(),
54    };
55
56    info!(
57        concat!(
58            "Starting Obliteration Kernel on {}.\n",
59            "cpu_vendor                 : {} × {}\n",
60            "cpu_id                     : {:#x}\n",
61            "boot_parameter.idps.product: {}\n",
62            "physfree                   : {:#x}"
63        ),
64        String::from_utf8_lossy(hw),
65        cpu.cpu_vendor,
66        config.max_cpu(),
67        cpu.cpu_id,
68        config.idps().product,
69        map.kern_vsize
70    );
71
72    // Setup the CPU after the first print to let the bootloader developer know (some of) their code
73    // are working.
74    let arch = unsafe { self::arch::setup_main_cpu(cpu) };
75
76    // Setup proc0 to represent the kernel.
77    let proc0 = Proc::new_bare(Arc::new(Proc0Abi));
78
79    // Setup thread0 to represent this thread.
80    let proc0 = Arc::new(proc0);
81    let thread0 = Thread::new_bare(proc0);
82
83    // Activate CPU context.
84    let thread0 = Arc::new(thread0);
85
86    unsafe {
87        self::context::run_with_context(
88            config,
89            arch,
90            0,
91            thread0,
92            move |s| setup(s, map, params1),
93            run,
94        )
95    };
96}
97
98fn setup(
99    setup: &mut ContextSetup,
100    map: &'static ::config::KernelMap,
101    param1: Arc<Param1>,
102) -> SetupResult {
103    // Initialize physical memory.
104    let mut mi = load_memory_map(u64::try_from(map.kern_vsize.get()).unwrap());
105    let mut map = String::with_capacity(0x2000);
106
107    fn format_map(tab: &[u64], last: usize, buf: &mut String) {
108        for i in (0..=last).step_by(2) {
109            let start = tab[i];
110            let end = tab[i + 1];
111            let size = SizeFormatter::new(end - start, DECIMAL);
112
113            write!(buf, "\n{start:#018x}-{end:#018x} ({size})").unwrap();
114        }
115    }
116
117    format_map(&mi.physmap, mi.physmap_last, &mut map);
118
119    info!(
120        concat!(
121            "Memory map loaded with {} maps.\n",
122            "initial_memory_size: {} ({})\n",
123            "basemem            : {:#x}\n",
124            "boot_address       : {:#x}\n",
125            "mptramp_pagetables : {:#x}\n",
126            "Maxmem             : {:#x}",
127            "{}"
128        ),
129        mi.physmap_last,
130        mi.initial_memory_size,
131        SizeFormatter::new(mi.initial_memory_size, DECIMAL),
132        mi.boot_area,
133        mi.boot_info.addr,
134        mi.boot_info.page_tables,
135        mi.end_page,
136        map
137    );
138
139    map.clear();
140
141    // Initialize DMEM system.
142    let dmem = Dmem::new(&mut mi);
143
144    format_map(&mi.physmap, mi.physmap_last, &mut map);
145
146    info!(
147        concat!(
148            "DMEM initialized.\n",
149            "Mode  : {} ({})\n",
150            "Maxmem: {:#x}",
151            "{}"
152        ),
153        dmem.mode(),
154        dmem.config().name,
155        mi.end_page,
156        map
157    );
158
159    drop(map);
160
161    // TODO: We probably want to remove hard-coded start address of the first map here.
162    let mut phys_avail = [0u64; 61];
163    let mut pa_indx = 0;
164    let mut dump_avail = [0u64; 61];
165    let mut da_indx = 1;
166    let mut physmem = 0;
167    let page_size = PAGE_SIZE.get().try_into().unwrap();
168    let page_mask = u64::try_from(PAGE_MASK.get()).unwrap();
169    let unk1 = 0xA494000 + 0x2200000; // TODO: What is this?
170    let paddr_free = match mi.unk {
171        0 => mi.paddr_free + 0x400000, // TODO: Why 0x400000?
172        _ => mi.paddr_free,
173    };
174
175    mi.physmap[0] = page_size;
176
177    phys_avail[pa_indx] = mi.physmap[0];
178    pa_indx += 1;
179    phys_avail[pa_indx] = mi.physmap[0];
180    dump_avail[da_indx] = mi.physmap[0];
181
182    for i in (0..=mi.physmap_last).step_by(2) {
183        let begin = mi.physmap[i].checked_next_multiple_of(page_size).unwrap();
184        let end = min(mi.physmap[i + 1] & !page_mask, mi.end_page << PAGE_SHIFT);
185
186        for pa in (begin..end).step_by(PAGE_SIZE.get()) {
187            let mut full = false;
188
189            if (pa < (unk1 & 0xffffffffffe00000) || pa >= paddr_free)
190                && (mi.dcons_addr == 0
191                    || (pa < (mi.dcons_addr & 0xffffffffffffc000)
192                        || (mi.dcons_addr + mi.dcons_size <= pa)))
193            {
194                if mi.memtest == 0 {
195                    if pa == phys_avail[pa_indx] {
196                        phys_avail[pa_indx] = pa + page_size;
197                        physmem += 1;
198                    } else {
199                        let i = pa_indx + 1;
200
201                        if i == 60 {
202                            warn!("Too many holes in the physical address space, giving up.");
203                            full = true;
204                        } else {
205                            pa_indx += 2;
206                            phys_avail[i] = pa;
207                            phys_avail[pa_indx] = pa + page_size;
208                            physmem += 1;
209                        }
210                    }
211                } else {
212                    todo!()
213                }
214            }
215
216            if pa == dump_avail[da_indx] {
217                dump_avail[da_indx] = pa + page_size;
218            } else if (da_indx + 1) != 60 {
219                dump_avail[da_indx + 1] = pa;
220                dump_avail[da_indx + 2] = pa + page_size;
221                da_indx += 2;
222            }
223
224            if full {
225                break;
226            }
227        }
228    }
229
230    if mi.memtest != 0 {
231        todo!()
232    }
233
234    // TODO: What is this?
235    let msgbuf_size: u64 = param1
236        .msgbuf_size()
237        .next_multiple_of(PAGE_SIZE.get())
238        .try_into()
239        .unwrap();
240
241    #[allow(clippy::while_immutable_condition)] // TODO: Remove this once implement below todo.
242    while phys_avail[pa_indx] <= (phys_avail[pa_indx - 1] + page_size + msgbuf_size) {
243        todo!()
244    }
245
246    mi.end_page = phys_avail[pa_indx] >> PAGE_SHIFT;
247    phys_avail[pa_indx] -= msgbuf_size;
248
249    // TODO: Set msgbufp and validate DMEM addresses.
250    // TODO: Why Orbis skip the first page?
251    let mut pa = String::with_capacity(0x2000);
252    let mut da = String::with_capacity(0x2000);
253
254    format_map(&phys_avail, pa_indx - 1, &mut pa);
255    format_map(&dump_avail, da_indx - 1, &mut da);
256
257    info!(
258        concat!(
259            "Available physical memory populated.\n",
260            "Maxmem    : {:#x}\n",
261            "physmem   : {}\n",
262            "phys_avail:",
263            "{}\n",
264            "dump_avail:",
265            "{}"
266        ),
267        mi.end_page, physmem, pa, da
268    );
269
270    drop(da);
271    drop(pa);
272
273    // Run sysinit vector for subsystem. The Orbis use linker to put all sysinit functions in a list
274    // then loop the list to execute all of it. We manually execute those functions instead for
275    // readability. This also allow us to pass data from one function to another function. See
276    // mi_startup function on the Orbis for a reference.
277    let pmgr = ProcMgr::new();
278
279    setup.set_uma(init_vm(phys_avail, &dmem)); // 161 on PS4 11.00.
280
281    SetupResult { pmgr }
282}
283
284fn run(sr: SetupResult) -> ! {
285    // Activate stage 2 heap.
286    info!("Activating stage 2 heap.");
287
288    unsafe { KERNEL_HEAP.activate_stage2() };
289
290    // Run remaining sysinit vector.
291    create_init(&sr); // 659 on PS4 11.00.
292    swapper(&sr); // 1119 on PS4 11.00.
293}
294
295/// See `getmemsize` on the Orbis for a reference.
296///
297/// # Reference offsets
298/// | Version | Offset |
299/// |---------|--------|
300/// |PS4 11.00|0x25CF00|
301fn load_memory_map(mut paddr_free: u64) -> MemoryInfo {
302    // TODO: Some of the logic around here are very hard to understand.
303    let mut physmap = [0u64; 60];
304    let mut last = 0usize;
305    let map = match boot_env() {
306        BootEnv::Vm(v) => v.memory_map.as_slice(),
307    };
308
309    'top: for m in map {
310        // We only interested in RAM.
311        match m.ty {
312            MapType::None => break,
313            MapType::Ram => (),
314            MapType::Reserved => continue,
315        }
316
317        // TODO: This should be possible only when booting from BIOS.
318        if m.len == 0 {
319            break;
320        }
321
322        // Check if we need to insert before the previous entries.
323        let mut insert_idx = last + 2;
324        let mut j = 0usize;
325
326        while j <= last {
327            if m.base < physmap[j + 1] {
328                // Check if end address overlapped.
329                if m.base + m.len > physmap[j] {
330                    warn!("Overlapping memory regions, ignoring second region.");
331                    continue 'top;
332                }
333
334                insert_idx = j;
335                break;
336            }
337
338            j += 2;
339        }
340
341        // Check if end address is the start address of the next entry. If yes we just change
342        // base address of it to increase its size.
343        if insert_idx <= last && m.base + m.len == physmap[insert_idx] {
344            physmap[insert_idx] = m.base;
345            continue;
346        }
347
348        // Check if start address is the end address of the previous entry. If yes we just
349        // increase the size of previous entry.
350        if insert_idx > 0 && m.base == physmap[insert_idx - 1] {
351            physmap[insert_idx - 1] = m.base + m.len;
352            continue;
353        }
354
355        last += 2;
356
357        if last == physmap.len() {
358            warn!("Too many segments in the physical address map, giving up.");
359            break;
360        }
361
362        // This loop does not make sense on the Orbis. It seems like if this loop once
363        // entered it will never exit.
364        #[allow(clippy::while_immutable_condition)]
365        while insert_idx < last {
366            todo!()
367        }
368
369        physmap[insert_idx] = m.base;
370        physmap[insert_idx + 1] = m.base + m.len;
371    }
372
373    // Check if bootloader provide us a memory map. The Orbis will check if
374    // preload_search_info() return null but we can't do that since we use a static size array
375    // to pass this information.
376    if physmap[1] == 0 {
377        panic!("no memory map provided to the kernel");
378    }
379
380    // Get initial memory size and BIOS boot area.
381    let page_size = PAGE_SIZE.get().try_into().unwrap();
382    let page_mask = !u64::try_from(PAGE_MASK.get()).unwrap();
383    let mut initial_memory_size = 0;
384    let mut boot_area = None;
385
386    for i in (0..=last).step_by(2) {
387        // Check if BIOS boot area.
388        if physmap[i] == 0 {
389            // TODO: Why 1024?
390            boot_area = Some(physmap[i + 1] / 1024);
391        }
392
393        // Add to initial memory size.
394        let start = physmap[i].next_multiple_of(page_size);
395        let end = physmap[i + 1] & page_mask;
396
397        initial_memory_size += end.saturating_sub(start);
398    }
399
400    // Check if we have boot area to start secondary CPU.
401    let boot_area = match boot_area {
402        Some(v) => v,
403        None => panic!("no boot area provided to the kernel"),
404    };
405
406    // TODO: This seems like it is assume the first physmap always a boot area. The problem is
407    // what is the point of the logic on the above to find boot_area?
408    let boot_info = adjust_boot_area(physmap[1] / 1024);
409
410    physmap[1] = boot_info.page_tables;
411
412    // Get end page.
413    let mut end_page = physmap[last + 1] >> PAGE_SHIFT;
414    let config = config();
415
416    if let Some(v) = config.env("hw.physmem") {
417        end_page = min(v.parse::<u64>().unwrap() >> PAGE_SHIFT, end_page);
418    }
419
420    // Get memtest flags.
421    let memtest = config
422        .env("hw.memtest.tests")
423        .map(|v| v.parse().unwrap())
424        .unwrap_or(1);
425
426    // TODO: There is some unknown calls here.
427    let mut unk = 0;
428
429    for i in (0..=last).rev().step_by(2) {
430        unk = (unk + physmap[i + 1]) - physmap[i];
431    }
432
433    // TODO: Figure out the name of this variable.
434    let mut unk = u32::from((unk >> 33) != 0);
435
436    // TODO: We probably want to remove this CPU model checks but better to keep it for now so we
437    // don't have a headache when the other places rely on the effect of this check.
438    #[cfg(target_arch = "x86_64")]
439    let cpu_ok = (arch().cpu.cpu_id & 0xffffff80) == 0x740f00;
440    #[cfg(not(target_arch = "x86_64"))]
441    let cpu_ok = true;
442
443    if cpu_ok && !config.dipsw(Dipsw::Unk140) && !config.dipsw(Dipsw::Unk146) {
444        unk |= 2;
445    }
446
447    paddr_free = load_pmap(paddr_free);
448
449    // Get dcons buffer address.
450    let (dcons_addr, dcons_size) = match (config.env("dcons.addr"), config.env("dcons.size")) {
451        (Some(addr), Some(size)) => (addr.parse().unwrap(), size.parse().unwrap()),
452        _ => (0, 0),
453    };
454
455    // The call to initialize_dmem is moved to the caller of this function.
456    MemoryInfo {
457        physmap,
458        physmap_last: last,
459        boot_area,
460        boot_info,
461        dcons_addr,
462        dcons_size,
463        initial_memory_size,
464        end_page,
465        unk,
466        paddr_free,
467        memtest,
468    }
469}
470
471/// See `mp_bootaddress` on the Orbis for a reference.
472///
473/// # Reference offsets
474/// | Version | Offset |
475/// |---------|--------|
476/// |PS4 11.00|0x1B9D20|
477fn adjust_boot_area(original: u64) -> BootInfo {
478    // TODO: Most logic here does not make sense.
479    let page_size = u64::try_from(PAGE_SIZE.get()).unwrap();
480    let page_mask = !u64::try_from(PAGE_MASK.get()).unwrap();
481    let need = u64::try_from(arch().secondary_start.len()).unwrap();
482    let addr = (original * 1024) & page_mask;
483
484    // TODO: What is this?
485    let addr = if need <= ((original * 1024) & 0xC00) {
486        addr
487    } else {
488        addr - page_size
489    };
490
491    BootInfo {
492        addr,
493        page_tables: addr - (page_size * 3),
494    }
495}
496
497/// See `pmap_bootstrap` on the Orbis for a reference.
498///
499/// # Reference offsets
500/// | Version | Offset |
501/// |---------|--------|
502/// |PS4 11.00|0x1127C0|
503fn load_pmap(paddr_free: u64) -> u64 {
504    let config = config();
505
506    if config.is_allow_disabling_aslr() && config.dipsw(Dipsw::DisabledKaslr) {
507        todo!()
508    } else {
509        // TODO: There are a lot of unknown variables here so we skip implementing this until we
510        // run into the code that using them.
511    }
512
513    paddr_free
514}
515
516/// See `vm_mem_init` function on the Orbis for a reference.
517///
518/// # Reference offsets
519/// | Version | Offset |
520/// |---------|--------|
521/// |PS4 11.00|0x39A390|
522fn init_vm(phys_avail: [u64; 61], dmem: &Dmem) -> Arc<Uma> {
523    // Initialize VM.
524    let vm = Vm::new(phys_avail, dmem).unwrap();
525
526    // Initialize UMA.
527    Uma::new(vm)
528}
529
530/// See `create_init` function on the Orbis for a reference.
531///
532/// # Reference offsets
533/// | Version | Offset |
534/// |---------|--------|
535/// |PS4 11.00|0x2BEF30|
536fn create_init(sr: &SetupResult) {
537    let abi = Arc::new(Ps4Abi);
538    let flags = Fork::CopyFd | Fork::CreateProcess;
539
540    sr.pmgr.fork(abi, flags).unwrap();
541
542    todo!()
543}
544
545/// See `scheduler` function on the Orbis for a reference.
546///
547/// # Reference offsets
548/// | Version | Offset |
549/// |---------|--------|
550/// |PS4 11.00|0x437E00|
551fn swapper(sr: &SetupResult) -> ! {
552    // TODO: Subscribe to "system_suspend_phase2_pre_sync" and "system_resume_phase2" event.
553    loop {
554        // TODO: Implement a call to vm_page_count_min().
555        let procs = sr.pmgr.list();
556
557        if procs.len() == 0 {
558            // TODO: The PS4 check for some value for non-zero but it seems like that value always
559            // zero.
560            sleep();
561            continue;
562        }
563
564        todo!();
565    }
566}
567
568/// Implementation of [`ProcAbi`] for kernel process.
569///
570/// See `null_sysvec` on the PS4 for a reference.
571struct Proc0Abi;
572
573impl ProcAbi for Proc0Abi {
574    /// See `null_fetch_syscall_args` on the PS4 for a reference.
575    fn syscall_handler(&self) {
576        unimplemented!()
577    }
578}
579
580/// Result of [`setup()`].
581struct SetupResult {
582    pmgr: Arc<ProcMgr>,
583}
584
585/// Contains memory information populated from memory map.
586struct MemoryInfo {
587    physmap: [u64; 60],
588    physmap_last: usize,
589    boot_area: u64,
590    boot_info: BootInfo,
591    dcons_addr: u64,
592    dcons_size: u64,
593    initial_memory_size: u64,
594    end_page: u64,
595    unk: u32, // Seems like the only possible values are 0 - 3.
596    paddr_free: u64,
597    memtest: u64,
598}
599
600/// Contains information for memory to boot a secondary CPU.
601struct BootInfo {
602    addr: u64,
603    page_tables: u64,
604}
605
606// SAFETY: PRIMITIVE_HEAP is a mutable static so it valid for reads and writes. This will be safe as
607// long as no one access PRIMITIVE_HEAP.
608#[allow(dead_code)]
609#[cfg_attr(target_os = "none", global_allocator)]
610static KERNEL_HEAP: KernelHeap = unsafe { KernelHeap::new(&raw mut PRIMITIVE_HEAP) };
611static mut PRIMITIVE_HEAP: [u8; 1024 * 1024] = unsafe { zeroed() };