1#![no_std]
2#![cfg_attr(not(test), no_main)]
3
4use self::config::{Config, Dipsw, PAGE_MASK, PAGE_SHIFT, PAGE_SIZE, Param1};
5use self::context::{ContextSetup, arch, config};
6use self::dmem::Dmem;
7use self::imgact::Ps4Abi;
8use self::malloc::KernelHeap;
9use self::proc::{Fork, Proc, ProcAbi, ProcMgr, Thread};
10use self::sched::sleep;
11use self::uma::Uma;
12use self::vm::Vm;
13use ::config::{BootEnv, MapType};
14use alloc::string::String;
15use alloc::sync::Arc;
16use core::cmp::min;
17use core::fmt::Write;
18use core::mem::zeroed;
19use humansize::{DECIMAL, SizeFormatter};
20use krt::{boot_env, info, warn};
21
22#[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")]
23#[cfg_attr(target_arch = "x86_64", path = "x86_64.rs")]
24mod arch;
25mod config;
26mod context;
27mod dmem;
28mod event;
29mod imgact;
30mod imgfmt;
31mod lock;
32mod malloc;
33mod proc;
34mod sched;
35mod signal;
36mod subsystem;
37mod trap;
38mod uma;
39mod vm;
40
41extern crate alloc;
42
43#[cfg_attr(target_os = "none", unsafe(no_mangle))]
47fn main(map: &'static ::config::KernelMap, config: &'static ::config::Config) -> ! {
48 let config = Config::new(config);
50 let params1 = Param1::new(&config);
51 let cpu = self::arch::identify_cpu();
52 let hw = match boot_env() {
53 BootEnv::Vm(vm) => vm.hypervisor(),
54 };
55
56 info!(
57 concat!(
58 "Starting Obliteration Kernel on {}.\n",
59 "cpu_vendor : {} × {}\n",
60 "cpu_id : {:#x}\n",
61 "boot_parameter.idps.product: {}\n",
62 "physfree : {:#x}"
63 ),
64 String::from_utf8_lossy(hw),
65 cpu.cpu_vendor,
66 config.max_cpu(),
67 cpu.cpu_id,
68 config.idps().product,
69 map.kern_vsize
70 );
71
72 let arch = unsafe { self::arch::setup_main_cpu(cpu) };
75
76 let proc0 = Proc::new_bare(Arc::new(Proc0Abi));
78
79 let proc0 = Arc::new(proc0);
81 let thread0 = Thread::new_bare(proc0);
82
83 let thread0 = Arc::new(thread0);
85
86 unsafe {
87 self::context::run_with_context(
88 config,
89 arch,
90 0,
91 thread0,
92 move |s| setup(s, map, params1),
93 run,
94 )
95 };
96}
97
98fn setup(
99 setup: &mut ContextSetup,
100 map: &'static ::config::KernelMap,
101 param1: Arc<Param1>,
102) -> SetupResult {
103 let mut mi = load_memory_map(u64::try_from(map.kern_vsize.get()).unwrap());
105 let mut map = String::with_capacity(0x2000);
106
107 fn format_map(tab: &[u64], last: usize, buf: &mut String) {
108 for i in (0..=last).step_by(2) {
109 let start = tab[i];
110 let end = tab[i + 1];
111 let size = SizeFormatter::new(end - start, DECIMAL);
112
113 write!(buf, "\n{start:#018x}-{end:#018x} ({size})").unwrap();
114 }
115 }
116
117 format_map(&mi.physmap, mi.physmap_last, &mut map);
118
119 info!(
120 concat!(
121 "Memory map loaded with {} maps.\n",
122 "initial_memory_size: {} ({})\n",
123 "basemem : {:#x}\n",
124 "boot_address : {:#x}\n",
125 "mptramp_pagetables : {:#x}\n",
126 "Maxmem : {:#x}",
127 "{}"
128 ),
129 mi.physmap_last,
130 mi.initial_memory_size,
131 SizeFormatter::new(mi.initial_memory_size, DECIMAL),
132 mi.boot_area,
133 mi.boot_info.addr,
134 mi.boot_info.page_tables,
135 mi.end_page,
136 map
137 );
138
139 map.clear();
140
141 let dmem = Dmem::new(&mut mi);
143
144 format_map(&mi.physmap, mi.physmap_last, &mut map);
145
146 info!(
147 concat!(
148 "DMEM initialized.\n",
149 "Mode : {} ({})\n",
150 "Maxmem: {:#x}",
151 "{}"
152 ),
153 dmem.mode(),
154 dmem.config().name,
155 mi.end_page,
156 map
157 );
158
159 drop(map);
160
161 let mut phys_avail = [0u64; 61];
163 let mut pa_indx = 0;
164 let mut dump_avail = [0u64; 61];
165 let mut da_indx = 1;
166 let mut physmem = 0;
167 let page_size = PAGE_SIZE.get().try_into().unwrap();
168 let page_mask = u64::try_from(PAGE_MASK.get()).unwrap();
169 let unk1 = 0xA494000 + 0x2200000; let paddr_free = match mi.unk {
171 0 => mi.paddr_free + 0x400000, _ => mi.paddr_free,
173 };
174
175 mi.physmap[0] = page_size;
176
177 phys_avail[pa_indx] = mi.physmap[0];
178 pa_indx += 1;
179 phys_avail[pa_indx] = mi.physmap[0];
180 dump_avail[da_indx] = mi.physmap[0];
181
182 for i in (0..=mi.physmap_last).step_by(2) {
183 let begin = mi.physmap[i].checked_next_multiple_of(page_size).unwrap();
184 let end = min(mi.physmap[i + 1] & !page_mask, mi.end_page << PAGE_SHIFT);
185
186 for pa in (begin..end).step_by(PAGE_SIZE.get()) {
187 let mut full = false;
188
189 if (pa < (unk1 & 0xffffffffffe00000) || pa >= paddr_free)
190 && (mi.dcons_addr == 0
191 || (pa < (mi.dcons_addr & 0xffffffffffffc000)
192 || (mi.dcons_addr + mi.dcons_size <= pa)))
193 {
194 if mi.memtest == 0 {
195 if pa == phys_avail[pa_indx] {
196 phys_avail[pa_indx] = pa + page_size;
197 physmem += 1;
198 } else {
199 let i = pa_indx + 1;
200
201 if i == 60 {
202 warn!("Too many holes in the physical address space, giving up.");
203 full = true;
204 } else {
205 pa_indx += 2;
206 phys_avail[i] = pa;
207 phys_avail[pa_indx] = pa + page_size;
208 physmem += 1;
209 }
210 }
211 } else {
212 todo!()
213 }
214 }
215
216 if pa == dump_avail[da_indx] {
217 dump_avail[da_indx] = pa + page_size;
218 } else if (da_indx + 1) != 60 {
219 dump_avail[da_indx + 1] = pa;
220 dump_avail[da_indx + 2] = pa + page_size;
221 da_indx += 2;
222 }
223
224 if full {
225 break;
226 }
227 }
228 }
229
230 if mi.memtest != 0 {
231 todo!()
232 }
233
234 let msgbuf_size: u64 = param1
236 .msgbuf_size()
237 .next_multiple_of(PAGE_SIZE.get())
238 .try_into()
239 .unwrap();
240
241 #[allow(clippy::while_immutable_condition)] while phys_avail[pa_indx] <= (phys_avail[pa_indx - 1] + page_size + msgbuf_size) {
243 todo!()
244 }
245
246 mi.end_page = phys_avail[pa_indx] >> PAGE_SHIFT;
247 phys_avail[pa_indx] -= msgbuf_size;
248
249 let mut pa = String::with_capacity(0x2000);
252 let mut da = String::with_capacity(0x2000);
253
254 format_map(&phys_avail, pa_indx - 1, &mut pa);
255 format_map(&dump_avail, da_indx - 1, &mut da);
256
257 info!(
258 concat!(
259 "Available physical memory populated.\n",
260 "Maxmem : {:#x}\n",
261 "physmem : {}\n",
262 "phys_avail:",
263 "{}\n",
264 "dump_avail:",
265 "{}"
266 ),
267 mi.end_page, physmem, pa, da
268 );
269
270 drop(da);
271 drop(pa);
272
273 let pmgr = ProcMgr::new();
278
279 setup.set_uma(init_vm(phys_avail, &dmem)); SetupResult { pmgr }
282}
283
284fn run(sr: SetupResult) -> ! {
285 info!("Activating stage 2 heap.");
287
288 unsafe { KERNEL_HEAP.activate_stage2() };
289
290 create_init(&sr); swapper(&sr); }
294
295fn load_memory_map(mut paddr_free: u64) -> MemoryInfo {
302 let mut physmap = [0u64; 60];
304 let mut last = 0usize;
305 let map = match boot_env() {
306 BootEnv::Vm(v) => v.memory_map.as_slice(),
307 };
308
309 'top: for m in map {
310 match m.ty {
312 MapType::None => break,
313 MapType::Ram => (),
314 MapType::Reserved => continue,
315 }
316
317 if m.len == 0 {
319 break;
320 }
321
322 let mut insert_idx = last + 2;
324 let mut j = 0usize;
325
326 while j <= last {
327 if m.base < physmap[j + 1] {
328 if m.base + m.len > physmap[j] {
330 warn!("Overlapping memory regions, ignoring second region.");
331 continue 'top;
332 }
333
334 insert_idx = j;
335 break;
336 }
337
338 j += 2;
339 }
340
341 if insert_idx <= last && m.base + m.len == physmap[insert_idx] {
344 physmap[insert_idx] = m.base;
345 continue;
346 }
347
348 if insert_idx > 0 && m.base == physmap[insert_idx - 1] {
351 physmap[insert_idx - 1] = m.base + m.len;
352 continue;
353 }
354
355 last += 2;
356
357 if last == physmap.len() {
358 warn!("Too many segments in the physical address map, giving up.");
359 break;
360 }
361
362 #[allow(clippy::while_immutable_condition)]
365 while insert_idx < last {
366 todo!()
367 }
368
369 physmap[insert_idx] = m.base;
370 physmap[insert_idx + 1] = m.base + m.len;
371 }
372
373 if physmap[1] == 0 {
377 panic!("no memory map provided to the kernel");
378 }
379
380 let page_size = PAGE_SIZE.get().try_into().unwrap();
382 let page_mask = !u64::try_from(PAGE_MASK.get()).unwrap();
383 let mut initial_memory_size = 0;
384 let mut boot_area = None;
385
386 for i in (0..=last).step_by(2) {
387 if physmap[i] == 0 {
389 boot_area = Some(physmap[i + 1] / 1024);
391 }
392
393 let start = physmap[i].next_multiple_of(page_size);
395 let end = physmap[i + 1] & page_mask;
396
397 initial_memory_size += end.saturating_sub(start);
398 }
399
400 let boot_area = match boot_area {
402 Some(v) => v,
403 None => panic!("no boot area provided to the kernel"),
404 };
405
406 let boot_info = adjust_boot_area(physmap[1] / 1024);
409
410 physmap[1] = boot_info.page_tables;
411
412 let mut end_page = physmap[last + 1] >> PAGE_SHIFT;
414 let config = config();
415
416 if let Some(v) = config.env("hw.physmem") {
417 end_page = min(v.parse::<u64>().unwrap() >> PAGE_SHIFT, end_page);
418 }
419
420 let memtest = config
422 .env("hw.memtest.tests")
423 .map(|v| v.parse().unwrap())
424 .unwrap_or(1);
425
426 let mut unk = 0;
428
429 for i in (0..=last).rev().step_by(2) {
430 unk = (unk + physmap[i + 1]) - physmap[i];
431 }
432
433 let mut unk = u32::from((unk >> 33) != 0);
435
436 #[cfg(target_arch = "x86_64")]
439 let cpu_ok = (arch().cpu.cpu_id & 0xffffff80) == 0x740f00;
440 #[cfg(not(target_arch = "x86_64"))]
441 let cpu_ok = true;
442
443 if cpu_ok && !config.dipsw(Dipsw::Unk140) && !config.dipsw(Dipsw::Unk146) {
444 unk |= 2;
445 }
446
447 paddr_free = load_pmap(paddr_free);
448
449 let (dcons_addr, dcons_size) = match (config.env("dcons.addr"), config.env("dcons.size")) {
451 (Some(addr), Some(size)) => (addr.parse().unwrap(), size.parse().unwrap()),
452 _ => (0, 0),
453 };
454
455 MemoryInfo {
457 physmap,
458 physmap_last: last,
459 boot_area,
460 boot_info,
461 dcons_addr,
462 dcons_size,
463 initial_memory_size,
464 end_page,
465 unk,
466 paddr_free,
467 memtest,
468 }
469}
470
471fn adjust_boot_area(original: u64) -> BootInfo {
478 let page_size = u64::try_from(PAGE_SIZE.get()).unwrap();
480 let page_mask = !u64::try_from(PAGE_MASK.get()).unwrap();
481 let need = u64::try_from(arch().secondary_start.len()).unwrap();
482 let addr = (original * 1024) & page_mask;
483
484 let addr = if need <= ((original * 1024) & 0xC00) {
486 addr
487 } else {
488 addr - page_size
489 };
490
491 BootInfo {
492 addr,
493 page_tables: addr - (page_size * 3),
494 }
495}
496
497fn load_pmap(paddr_free: u64) -> u64 {
504 let config = config();
505
506 if config.is_allow_disabling_aslr() && config.dipsw(Dipsw::DisabledKaslr) {
507 todo!()
508 } else {
509 }
512
513 paddr_free
514}
515
516fn init_vm(phys_avail: [u64; 61], dmem: &Dmem) -> Arc<Uma> {
523 let vm = Vm::new(phys_avail, dmem).unwrap();
525
526 Uma::new(vm)
528}
529
530fn create_init(sr: &SetupResult) {
537 let abi = Arc::new(Ps4Abi);
538 let flags = Fork::CopyFd | Fork::CreateProcess;
539
540 sr.pmgr.fork(abi, flags).unwrap();
541
542 todo!()
543}
544
545fn swapper(sr: &SetupResult) -> ! {
552 loop {
554 let procs = sr.pmgr.list();
556
557 if procs.len() == 0 {
558 sleep();
561 continue;
562 }
563
564 todo!();
565 }
566}
567
568struct Proc0Abi;
572
573impl ProcAbi for Proc0Abi {
574 fn syscall_handler(&self) {
576 unimplemented!()
577 }
578}
579
580struct SetupResult {
582 pmgr: Arc<ProcMgr>,
583}
584
585struct MemoryInfo {
587 physmap: [u64; 60],
588 physmap_last: usize,
589 boot_area: u64,
590 boot_info: BootInfo,
591 dcons_addr: u64,
592 dcons_size: u64,
593 initial_memory_size: u64,
594 end_page: u64,
595 unk: u32, paddr_free: u64,
597 memtest: u64,
598}
599
600struct BootInfo {
602 addr: u64,
603 page_tables: u64,
604}
605
606#[allow(dead_code)]
609#[cfg_attr(target_os = "none", global_allocator)]
610static KERNEL_HEAP: KernelHeap = unsafe { KernelHeap::new(&raw mut PRIMITIVE_HEAP) };
611static mut PRIMITIVE_HEAP: [u8; 1024 * 1024] = unsafe { zeroed() };