1#![no_std]
2#![cfg_attr(not(test), no_main)]
3
4use self::config::{Config, Dipsw, PAGE_MASK, PAGE_SHIFT, PAGE_SIZE, Param1};
5use self::context::{ContextSetup, arch, config};
6use self::dmem::Dmem;
7use self::imgact::Ps4Abi;
8use self::malloc::KernelHeap;
9use self::proc::{Fork, Proc, ProcAbi, ProcMgr, Thread};
10use self::sched::sleep;
11use self::uma::Uma;
12use self::vm::Vm;
13use ::config::{BootEnv, MapType};
14use alloc::string::String;
15use alloc::sync::Arc;
16use core::cmp::min;
17use core::fmt::Write;
18use humansize::{DECIMAL, SizeFormatter};
19use krt::{boot_env, info, warn};
20
21#[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")]
22#[cfg_attr(target_arch = "x86_64", path = "x86_64.rs")]
23mod arch;
24mod config;
25mod context;
26mod dmem;
27mod event;
28mod imgact;
29mod imgfmt;
30mod lock;
31mod malloc;
32mod proc;
33mod sched;
34mod signal;
35mod subsystem;
36mod trap;
37mod uma;
38mod vm;
39
40extern crate alloc;
41
42#[cfg_attr(target_os = "none", unsafe(no_mangle))]
46fn main(map: &'static ::config::KernelMap, config: &'static ::config::Config) -> ! {
47 let config = Config::new(config);
49 let params1 = Param1::new(&config);
50 let cpu = self::arch::identify_cpu();
51 let hw = match boot_env() {
52 BootEnv::Vm(vm) => vm.hypervisor(),
53 };
54
55 info!(
56 concat!(
57 "Starting Obliteration Kernel on {}.\n",
58 "cpu_vendor : {} × {}\n",
59 "cpu_id : {:#x}\n",
60 "boot_parameter.idps.product: {}\n",
61 "physfree : {:#x}"
62 ),
63 String::from_utf8_lossy(hw),
64 cpu.cpu_vendor,
65 config.max_cpu(),
66 cpu.cpu_id,
67 config.idps().product,
68 map.kern_vsize
69 );
70
71 let arch = unsafe { self::arch::setup_main_cpu(cpu) };
74
75 let proc0 = Proc::new_bare(Arc::new(Proc0Abi));
77
78 let proc0 = Arc::new(proc0);
80 let thread0 = Thread::new_bare(proc0);
81
82 let thread0 = Arc::new(thread0);
84
85 unsafe {
86 self::context::run_with_context(
87 config,
88 arch,
89 0,
90 thread0,
91 move |s| setup(s, map, params1),
92 run,
93 )
94 };
95}
96
97fn setup(
98 setup: &mut ContextSetup,
99 map: &'static ::config::KernelMap,
100 param1: Arc<Param1>,
101) -> SetupResult {
102 let mut mi = load_memory_map(u64::try_from(map.kern_vsize.get()).unwrap());
104 let mut map = String::with_capacity(0x2000);
105
106 fn format_map(tab: &[u64], last: usize, buf: &mut String) {
107 for i in (0..=last).step_by(2) {
108 let start = tab[i];
109 let end = tab[i + 1];
110 let size = SizeFormatter::new(end - start, DECIMAL);
111
112 write!(buf, "\n{start:#018x}-{end:#018x} ({size})").unwrap();
113 }
114 }
115
116 format_map(&mi.physmap, mi.physmap_last, &mut map);
117
118 info!(
119 concat!(
120 "Memory map loaded with {} maps.\n",
121 "initial_memory_size: {} ({})\n",
122 "basemem : {:#x}\n",
123 "boot_address : {:#x}\n",
124 "mptramp_pagetables : {:#x}\n",
125 "Maxmem : {:#x}",
126 "{}"
127 ),
128 mi.physmap_last,
129 mi.initial_memory_size,
130 SizeFormatter::new(mi.initial_memory_size, DECIMAL),
131 mi.boot_area,
132 mi.boot_info.addr,
133 mi.boot_info.page_tables,
134 mi.end_page,
135 map
136 );
137
138 map.clear();
139
140 let dmem = Dmem::new(&mut mi);
142
143 format_map(&mi.physmap, mi.physmap_last, &mut map);
144
145 info!(
146 concat!(
147 "DMEM initialized.\n",
148 "Mode : {} ({})\n",
149 "Maxmem: {:#x}",
150 "{}"
151 ),
152 dmem.mode(),
153 dmem.config().name,
154 mi.end_page,
155 map
156 );
157
158 drop(map);
159
160 let mut phys_avail = [0u64; 61];
162 let mut pa_indx = 0;
163 let mut dump_avail = [0u64; 61];
164 let mut da_indx = 1;
165 let mut physmem = 0;
166 let page_size = PAGE_SIZE.get().try_into().unwrap();
167 let page_mask = u64::try_from(PAGE_MASK.get()).unwrap();
168 let unk1 = 0xA494000 + 0x2200000; let paddr_free = match mi.unk {
170 0 => mi.paddr_free + 0x400000, _ => mi.paddr_free,
172 };
173
174 mi.physmap[0] = page_size;
175
176 phys_avail[pa_indx] = mi.physmap[0];
177 pa_indx += 1;
178 phys_avail[pa_indx] = mi.physmap[0];
179 dump_avail[da_indx] = mi.physmap[0];
180
181 for i in (0..=mi.physmap_last).step_by(2) {
182 let begin = mi.physmap[i].checked_next_multiple_of(page_size).unwrap();
183 let end = min(mi.physmap[i + 1] & !page_mask, mi.end_page << PAGE_SHIFT);
184
185 for pa in (begin..end).step_by(PAGE_SIZE.get()) {
186 let mut full = false;
187
188 if (pa < (unk1 & 0xffffffffffe00000) || pa >= paddr_free)
189 && (mi.dcons_addr == 0
190 || (pa < (mi.dcons_addr & 0xffffffffffffc000)
191 || (mi.dcons_addr + mi.dcons_size <= pa)))
192 {
193 if mi.memtest == 0 {
194 if pa == phys_avail[pa_indx] {
195 phys_avail[pa_indx] = pa + page_size;
196 physmem += 1;
197 } else {
198 let i = pa_indx + 1;
199
200 if i == 60 {
201 warn!("Too many holes in the physical address space, giving up.");
202 full = true;
203 } else {
204 pa_indx += 2;
205 phys_avail[i] = pa;
206 phys_avail[pa_indx] = pa + page_size;
207 physmem += 1;
208 }
209 }
210 } else {
211 todo!()
212 }
213 }
214
215 if pa == dump_avail[da_indx] {
216 dump_avail[da_indx] = pa + page_size;
217 } else if (da_indx + 1) != 60 {
218 dump_avail[da_indx + 1] = pa;
219 dump_avail[da_indx + 2] = pa + page_size;
220 da_indx += 2;
221 }
222
223 if full {
224 break;
225 }
226 }
227 }
228
229 if mi.memtest != 0 {
230 todo!()
231 }
232
233 let msgbuf_size: u64 = param1
235 .msgbuf_size()
236 .next_multiple_of(PAGE_SIZE.get())
237 .try_into()
238 .unwrap();
239
240 #[allow(clippy::while_immutable_condition)] while phys_avail[pa_indx] <= (phys_avail[pa_indx - 1] + page_size + msgbuf_size) {
242 todo!()
243 }
244
245 mi.end_page = phys_avail[pa_indx] >> PAGE_SHIFT;
246 phys_avail[pa_indx] -= msgbuf_size;
247
248 let mut pa = String::with_capacity(0x2000);
251 let mut da = String::with_capacity(0x2000);
252
253 format_map(&phys_avail, pa_indx - 1, &mut pa);
254 format_map(&dump_avail, da_indx - 1, &mut da);
255
256 info!(
257 concat!(
258 "Available physical memory populated.\n",
259 "Maxmem : {:#x}\n",
260 "physmem : {}\n",
261 "phys_avail:",
262 "{}\n",
263 "dump_avail:",
264 "{}"
265 ),
266 mi.end_page, physmem, pa, da
267 );
268
269 drop(da);
270 drop(pa);
271
272 let pmgr = ProcMgr::new();
277
278 setup.set_uma(init_vm(phys_avail, &dmem)); SetupResult { pmgr }
281}
282
283fn run(sr: SetupResult) -> ! {
284 info!("Activating stage 2 heap.");
286
287 unsafe { KERNEL_HEAP.activate_stage2() };
288
289 create_init(&sr); swapper(&sr); }
293
294fn load_memory_map(mut paddr_free: u64) -> MemoryInfo {
301 let mut physmap = [0u64; 60];
303 let mut last = 0usize;
304 let map = match boot_env() {
305 BootEnv::Vm(v) => v.memory_map.as_slice(),
306 };
307
308 'top: for m in map {
309 match m.ty {
311 MapType::None => break,
312 MapType::Ram => (),
313 MapType::Reserved => continue,
314 }
315
316 if m.len == 0 {
318 break;
319 }
320
321 let mut insert_idx = last + 2;
323 let mut j = 0usize;
324
325 while j <= last {
326 if m.base < physmap[j + 1] {
327 if m.base + m.len > physmap[j] {
329 warn!("Overlapping memory regions, ignoring second region.");
330 continue 'top;
331 }
332
333 insert_idx = j;
334 break;
335 }
336
337 j += 2;
338 }
339
340 if insert_idx <= last && m.base + m.len == physmap[insert_idx] {
343 physmap[insert_idx] = m.base;
344 continue;
345 }
346
347 if insert_idx > 0 && m.base == physmap[insert_idx - 1] {
350 physmap[insert_idx - 1] = m.base + m.len;
351 continue;
352 }
353
354 last += 2;
355
356 if last == physmap.len() {
357 warn!("Too many segments in the physical address map, giving up.");
358 break;
359 }
360
361 #[allow(clippy::while_immutable_condition)]
364 while insert_idx < last {
365 todo!()
366 }
367
368 physmap[insert_idx] = m.base;
369 physmap[insert_idx + 1] = m.base + m.len;
370 }
371
372 if physmap[1] == 0 {
376 panic!("no memory map provided to the kernel");
377 }
378
379 let page_size = PAGE_SIZE.get().try_into().unwrap();
381 let page_mask = !u64::try_from(PAGE_MASK.get()).unwrap();
382 let mut initial_memory_size = 0;
383 let mut boot_area = None;
384
385 for i in (0..=last).step_by(2) {
386 if physmap[i] == 0 {
388 boot_area = Some(physmap[i + 1] / 1024);
390 }
391
392 let start = physmap[i].next_multiple_of(page_size);
394 let end = physmap[i + 1] & page_mask;
395
396 initial_memory_size += end.saturating_sub(start);
397 }
398
399 let boot_area = match boot_area {
401 Some(v) => v,
402 None => panic!("no boot area provided to the kernel"),
403 };
404
405 let boot_info = adjust_boot_area(physmap[1] / 1024);
408
409 physmap[1] = boot_info.page_tables;
410
411 let mut end_page = physmap[last + 1] >> PAGE_SHIFT;
413 let config = config();
414
415 if let Some(v) = config.env("hw.physmem") {
416 end_page = min(v.parse::<u64>().unwrap() >> PAGE_SHIFT, end_page);
417 }
418
419 let memtest = config
421 .env("hw.memtest.tests")
422 .map(|v| v.parse().unwrap())
423 .unwrap_or(1);
424
425 let mut unk = 0;
427
428 for i in (0..=last).rev().step_by(2) {
429 unk = (unk + physmap[i + 1]) - physmap[i];
430 }
431
432 let mut unk = u32::from((unk >> 33) != 0);
434
435 #[cfg(target_arch = "x86_64")]
438 let cpu_ok = (arch().cpu.cpu_id & 0xffffff80) == 0x740f00;
439 #[cfg(not(target_arch = "x86_64"))]
440 let cpu_ok = true;
441
442 if cpu_ok && !config.dipsw(Dipsw::Unk140) && !config.dipsw(Dipsw::Unk146) {
443 unk |= 2;
444 }
445
446 paddr_free = load_pmap(paddr_free);
447
448 let (dcons_addr, dcons_size) = match (config.env("dcons.addr"), config.env("dcons.size")) {
450 (Some(addr), Some(size)) => (addr.parse().unwrap(), size.parse().unwrap()),
451 _ => (0, 0),
452 };
453
454 MemoryInfo {
456 physmap,
457 physmap_last: last,
458 boot_area,
459 boot_info,
460 dcons_addr,
461 dcons_size,
462 initial_memory_size,
463 end_page,
464 unk,
465 paddr_free,
466 memtest,
467 }
468}
469
470fn adjust_boot_area(original: u64) -> BootInfo {
477 let page_size = u64::try_from(PAGE_SIZE.get()).unwrap();
479 let page_mask = !u64::try_from(PAGE_MASK.get()).unwrap();
480 let need = u64::try_from(arch().secondary_start.len()).unwrap();
481 let addr = (original * 1024) & page_mask;
482
483 let addr = if need <= ((original * 1024) & 0xC00) {
485 addr
486 } else {
487 addr - page_size
488 };
489
490 BootInfo {
491 addr,
492 page_tables: addr - (page_size * 3),
493 }
494}
495
496fn load_pmap(paddr_free: u64) -> u64 {
503 let config = config();
504
505 if config.is_allow_disabling_aslr() && config.dipsw(Dipsw::DisabledKaslr) {
506 todo!()
507 } else {
508 }
511
512 paddr_free
513}
514
515fn init_vm(phys_avail: [u64; 61], dmem: &Dmem) -> Arc<Uma> {
522 let vm = Vm::new(phys_avail, None, dmem).unwrap();
524
525 Uma::new(vm)
527}
528
529fn create_init(sr: &SetupResult) {
536 let abi = Arc::new(Ps4Abi);
537 let flags = Fork::CopyFd | Fork::CreateProcess;
538
539 info!("Creating init process.");
540
541 sr.pmgr.fork(abi, flags).unwrap();
542
543 todo!()
544}
545
546fn swapper(sr: &SetupResult) -> ! {
553 loop {
555 let procs = sr.pmgr.list();
557
558 if procs.len() == 0 {
559 sleep();
562 continue;
563 }
564
565 todo!();
566 }
567}
568
569struct Proc0Abi;
573
574impl ProcAbi for Proc0Abi {
575 fn syscall_handler(&self) {
577 unimplemented!()
578 }
579}
580
581struct SetupResult {
583 pmgr: Arc<ProcMgr>,
584}
585
586struct MemoryInfo {
588 physmap: [u64; 60],
589 physmap_last: usize,
590 boot_area: u64,
591 boot_info: BootInfo,
592 dcons_addr: u64,
593 dcons_size: u64,
594 initial_memory_size: u64,
595 end_page: u64,
596 unk: u32, paddr_free: u64,
598 memtest: u64,
599}
600
601struct BootInfo {
603 addr: u64,
604 page_tables: u64,
605}
606
607#[allow(dead_code)]
610#[cfg_attr(target_os = "none", global_allocator)]
611static KERNEL_HEAP: KernelHeap = unsafe { KernelHeap::new(&raw mut PRIMITIVE_HEAP) };
612static mut PRIMITIVE_HEAP: [u8; 1024 * 1024 * 2] = [0; _];