1#![no_std]
2#![cfg_attr(not(test), no_main)]
3#![allow(clippy::needless_pub_self)] #![allow(clippy::type_complexity)] use self::config::{Config, Dipsw, PAGE_MASK, PAGE_SHIFT, PAGE_SIZE, Param1};
7use self::context::{ContextSetup, arch, config};
8use self::dmem::Dmem;
9use self::imgact::Ps4Abi;
10use self::malloc::KernelHeap;
11use self::proc::{Fork, Proc, ProcAbi, ProcMgr, Thread};
12use self::sched::sleep;
13use self::uma::Uma;
14use self::vm::Vm;
15use ::config::{BootEnv, MapType};
16use alloc::string::String;
17use alloc::sync::Arc;
18use core::cmp::min;
19use core::fmt::Write;
20use humansize::{DECIMAL, SizeFormatter};
21use krt::{boot_env, info, warn};
22
23#[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")]
24#[cfg_attr(target_arch = "x86_64", path = "x86_64.rs")]
25mod arch;
26mod config;
27mod context;
28mod dmem;
29mod event;
30mod imgact;
31mod imgfmt;
32mod lock;
33mod malloc;
34mod proc;
35mod sched;
36mod signal;
37mod subsystem;
38mod trap;
39mod uma;
40mod vm;
41
42extern crate alloc;
43
44#[cfg_attr(target_os = "none", unsafe(no_mangle))]
48fn main(map: &'static ::config::KernelMap, config: &'static ::config::Config) -> ! {
49 let config = Config::new(config);
51 let params1 = Param1::new(&config);
52 let cpu = self::arch::identify_cpu();
53 let hw = match boot_env() {
54 BootEnv::Vm(vm) => vm.hypervisor(),
55 };
56
57 info!(
58 concat!(
59 "Starting Obliteration Kernel on {}.\n",
60 "cpu_vendor : {} × {}\n",
61 "cpu_id : {:#x}\n",
62 "boot_parameter.idps.product: {}\n",
63 "physfree : {:#x}"
64 ),
65 String::from_utf8_lossy(hw),
66 cpu.cpu_vendor,
67 config.max_cpu(),
68 cpu.cpu_id,
69 config.idps().product,
70 map.kern_vsize
71 );
72
73 let arch = unsafe { self::arch::setup_main_cpu(&config, cpu, map) };
76
77 let proc0 = Proc::new_bare(Arc::new(Proc0Abi));
79
80 let proc0 = Arc::new(proc0);
82 let thread0 = Thread::new_bare(proc0);
83
84 let thread0 = Arc::new(thread0);
86
87 unsafe {
88 self::context::run_with_context(
89 config,
90 arch,
91 0,
92 thread0,
93 move |s| setup(s, map, params1),
94 run,
95 )
96 };
97}
98
99fn setup(
100 setup: &mut ContextSetup,
101 map: &'static ::config::KernelMap,
102 param1: Arc<Param1>,
103) -> SetupResult {
104 let mut mi = load_memory_map();
106 let mut buf = String::with_capacity(0x2000);
107
108 fn format_map(tab: &[usize], last: usize, buf: &mut String) {
109 for i in (0..=last).step_by(2) {
110 let start = tab[i];
111 let end = tab[i + 1];
112 let size = SizeFormatter::new(end - start, DECIMAL);
113
114 write!(buf, "\n{start:#018x}-{end:#018x} ({size})").unwrap();
115 }
116 }
117
118 format_map(&mi.physmap, mi.physmap_last, &mut buf);
119
120 info!(
121 concat!(
122 "Memory map loaded with {} maps.\n",
123 "initial_memory_size: {} ({})\n",
124 "basemem : {:#x}\n",
125 "boot_address : {:#x}\n",
126 "mptramp_pagetables : {:#x}\n",
127 "Maxmem : {:#x}",
128 "{}"
129 ),
130 mi.physmap_last,
131 mi.initial_memory_size,
132 SizeFormatter::new(mi.initial_memory_size, DECIMAL),
133 mi.boot_area,
134 mi.boot_info.addr,
135 mi.boot_info.page_tables,
136 mi.end_page,
137 buf
138 );
139
140 buf.clear();
141
142 let dmem = Dmem::new(&mut mi);
144
145 format_map(&mi.physmap, mi.physmap_last, &mut buf);
146
147 info!(
148 concat!(
149 "DMEM initialized.\n",
150 "Mode : {} ({})\n",
151 "Maxmem: {:#x}",
152 "{}"
153 ),
154 dmem.mode(),
155 dmem.config().name,
156 mi.end_page,
157 buf
158 );
159
160 drop(buf);
161
162 let mut phys_avail = [0usize; 61];
164 let mut pa_indx = 0;
165 let mut dump_avail = [0usize; 61];
166 let mut da_indx = 1;
167 let mut physmem = 0;
168 let unk1 = 0xA494000 + 0x2200000; let paddr_free = match mi.unk {
170 0 => map.kern_vsize.get() + 0x400000, _ => map.kern_vsize.get(),
172 };
173
174 mi.physmap[0] = PAGE_SIZE.get();
175
176 phys_avail[pa_indx] = mi.physmap[0];
177 pa_indx += 1;
178 phys_avail[pa_indx] = mi.physmap[0];
179 dump_avail[da_indx] = mi.physmap[0];
180
181 for i in (0..=mi.physmap_last).step_by(2) {
182 let begin = mi.physmap[i]
183 .checked_next_multiple_of(PAGE_SIZE.get())
184 .unwrap();
185 let end = min(
186 mi.physmap[i + 1] & !PAGE_MASK.get(),
187 mi.end_page << PAGE_SHIFT,
188 );
189
190 for pa in (begin..end).step_by(PAGE_SIZE.get()) {
191 let mut full = false;
192
193 if (pa < (unk1 & 0xffffffffffe00000) || pa >= paddr_free)
194 && (mi.dcons_addr == 0
195 || (pa < (mi.dcons_addr & 0xffffffffffffc000)
196 || (mi.dcons_addr + mi.dcons_size <= pa)))
197 {
198 if mi.memtest == 0 {
199 if pa == phys_avail[pa_indx] {
200 phys_avail[pa_indx] = pa + PAGE_SIZE.get();
201 physmem += 1;
202 } else {
203 let i = pa_indx + 1;
204
205 if i == 60 {
206 warn!("Too many holes in the physical address space, giving up.");
207 full = true;
208 } else {
209 pa_indx += 2;
210 phys_avail[i] = pa;
211 phys_avail[pa_indx] = pa + PAGE_SIZE.get();
212 physmem += 1;
213 }
214 }
215 } else {
216 todo!()
217 }
218 }
219
220 if pa == dump_avail[da_indx] {
221 dump_avail[da_indx] = pa + PAGE_SIZE.get();
222 } else if (da_indx + 1) != 60 {
223 dump_avail[da_indx + 1] = pa;
224 dump_avail[da_indx + 2] = pa + PAGE_SIZE.get();
225 da_indx += 2;
226 }
227
228 if full {
229 break;
230 }
231 }
232 }
233
234 if mi.memtest != 0 {
235 todo!()
236 }
237
238 let msgbuf_size = param1.msgbuf_size().next_multiple_of(PAGE_SIZE.get());
240
241 #[allow(clippy::while_immutable_condition)] while phys_avail[pa_indx] <= (phys_avail[pa_indx - 1] + PAGE_SIZE.get() + msgbuf_size) {
243 todo!()
244 }
245
246 mi.end_page = phys_avail[pa_indx] >> PAGE_SHIFT;
247 phys_avail[pa_indx] -= msgbuf_size;
248
249 let mut pa = String::with_capacity(0x2000);
252 let mut da = String::with_capacity(0x2000);
253
254 format_map(&phys_avail, pa_indx - 1, &mut pa);
255 format_map(&dump_avail, da_indx - 1, &mut da);
256
257 info!(
258 concat!(
259 "Available physical memory populated.\n",
260 "Maxmem : {:#x}\n",
261 "physmem : {}\n",
262 "phys_avail:",
263 "{}\n",
264 "dump_avail:",
265 "{}"
266 ),
267 mi.end_page, physmem, pa, da
268 );
269
270 drop(da);
271 drop(pa);
272
273 let pmgr = ProcMgr::new();
278
279 setup.set_uma(init_vm(phys_avail, &dmem)); SetupResult { pmgr }
282}
283
284fn run(sr: SetupResult) -> ! {
285 info!("Activating stage 2 heap.");
287
288 unsafe { KERNEL_HEAP.activate_stage2() };
289
290 create_init(&sr); swapper(&sr); }
294
295fn load_memory_map() -> MemoryInfo {
302 let mut physmap = [0usize; 60];
304 let mut last = 0usize;
305 let memory_map = match boot_env() {
306 BootEnv::Vm(v) => v.memory_map.as_slice(),
307 };
308
309 'top: for m in memory_map {
310 match m.ty {
312 MapType::None => break,
313 MapType::Ram => (),
314 MapType::Reserved => continue,
315 }
316
317 if m.len == 0 {
319 break;
320 }
321
322 let mut insert_idx = last + 2;
324 let mut j = 0usize;
325
326 while j <= last {
327 if m.base < physmap[j + 1] {
328 if m.base + m.len > physmap[j] {
330 warn!("Overlapping memory regions, ignoring second region.");
331 continue 'top;
332 }
333
334 insert_idx = j;
335 break;
336 }
337
338 j += 2;
339 }
340
341 if insert_idx <= last && m.base + m.len == physmap[insert_idx] {
344 physmap[insert_idx] = m.base;
345 continue;
346 }
347
348 if insert_idx > 0 && m.base == physmap[insert_idx - 1] {
351 physmap[insert_idx - 1] = m.base + m.len;
352 continue;
353 }
354
355 last += 2;
356
357 if last == physmap.len() {
358 warn!("Too many segments in the physical address map, giving up.");
359 break;
360 }
361
362 #[allow(clippy::while_immutable_condition)]
365 while insert_idx < last {
366 todo!()
367 }
368
369 physmap[insert_idx] = m.base;
370 physmap[insert_idx + 1] = m.base + m.len;
371 }
372
373 if physmap[1] == 0 {
377 panic!("no memory map provided to the kernel");
378 }
379
380 let mut initial_memory_size = 0;
382 let mut boot_area = None;
383
384 for i in (0..=last).step_by(2) {
385 if physmap[i] == 0 {
387 boot_area = Some(physmap[i + 1] / 1024);
389 }
390
391 let start = physmap[i].next_multiple_of(PAGE_SIZE.get());
393 let end = physmap[i + 1] & !PAGE_MASK.get();
394
395 initial_memory_size += end.saturating_sub(start);
396 }
397
398 let boot_area = match boot_area {
400 Some(v) => v,
401 None => panic!("no boot area provided to the kernel"),
402 };
403
404 let boot_info = adjust_boot_area(physmap[1] / 1024);
407
408 physmap[1] = boot_info.page_tables;
409
410 let mut end_page = physmap[last + 1] >> PAGE_SHIFT;
412 let config = config();
413
414 if let Some(v) = config.env("hw.physmem") {
415 end_page = min(v.parse::<usize>().unwrap() >> PAGE_SHIFT, end_page);
416 }
417
418 let memtest = config
420 .env("hw.memtest.tests")
421 .map(|v| v.parse().unwrap())
422 .unwrap_or(1);
423
424 let mut unk = 0;
426
427 for i in (0..=last).rev().step_by(2) {
428 unk = (unk + physmap[i + 1]) - physmap[i];
429 }
430
431 let mut unk = u32::from((unk >> 33) != 0);
433
434 #[cfg(target_arch = "x86_64")]
437 let cpu_ok = (arch().cpu.cpu_id & 0xffffff80) == 0x740f00;
438 #[cfg(not(target_arch = "x86_64"))]
439 let cpu_ok = true;
440
441 if cpu_ok && !config.dipsw(Dipsw::Unk140) && !config.dipsw(Dipsw::Unk146) {
442 unk |= 2;
443 }
444
445 let (dcons_addr, dcons_size) = match (config.env("dcons.addr"), config.env("dcons.size")) {
447 (Some(addr), Some(size)) => (addr.parse().unwrap(), size.parse().unwrap()),
448 _ => (0, 0),
449 };
450
451 MemoryInfo {
453 physmap,
454 physmap_last: last,
455 boot_area,
456 boot_info,
457 dcons_addr,
458 dcons_size,
459 initial_memory_size,
460 end_page,
461 unk,
462 memtest,
463 }
464}
465
466fn adjust_boot_area(original: usize) -> BootInfo {
473 let need = arch().secondary_start.len();
475 let addr = (original * 1024) & !PAGE_MASK.get();
476
477 let addr = if need <= ((original * 1024) & 0xC00) {
479 addr
480 } else {
481 addr - PAGE_SIZE.get()
482 };
483
484 BootInfo {
485 addr,
486 page_tables: addr - (PAGE_SIZE.get() * 3),
487 }
488}
489
490fn init_vm(phys_avail: [usize; 61], dmem: &Dmem) -> Arc<Uma> {
497 let vm = Vm::new(phys_avail, None, dmem).unwrap();
499
500 Uma::new(vm)
502}
503
504fn create_init(sr: &SetupResult) {
511 let abi = Arc::new(Ps4Abi);
512 let flags = Fork::CopyFd | Fork::CreateProcess;
513
514 info!("Creating init process.");
515
516 sr.pmgr.fork(abi, flags).unwrap();
517
518 todo!()
519}
520
521fn swapper(sr: &SetupResult) -> ! {
528 loop {
530 let procs = sr.pmgr.list();
532
533 if procs.len() == 0 {
534 sleep();
537 continue;
538 }
539
540 todo!();
541 }
542}
543
544struct Proc0Abi;
548
549impl ProcAbi for Proc0Abi {
550 fn syscall_handler(&self) {
552 unimplemented!()
553 }
554}
555
556struct SetupResult {
558 pmgr: Arc<ProcMgr>,
559}
560
561struct MemoryInfo {
563 physmap: [usize; 60],
564 physmap_last: usize,
565 boot_area: usize,
566 boot_info: BootInfo,
567 dcons_addr: usize,
568 dcons_size: usize,
569 initial_memory_size: usize,
570 end_page: usize,
571 unk: u32, memtest: u64,
573}
574
575struct BootInfo {
577 addr: usize,
578 page_tables: usize,
579}
580
581#[allow(dead_code)]
584#[cfg_attr(target_os = "none", global_allocator)]
585static KERNEL_HEAP: KernelHeap = unsafe { KernelHeap::new(&raw mut PRIMITIVE_HEAP) };
586static mut PRIMITIVE_HEAP: [u8; 1024 * 1024 * 32] = [0; _];
587
588#[cfg(not(target_pointer_width = "64"))]
591compile_error!("Obliteration can only be used with 64-bit CPU");