1#![no_std]
2#![cfg_attr(not(test), no_main)]
3
4use self::config::{Config, Dipsw, PAGE_MASK, PAGE_SHIFT, PAGE_SIZE};
5use self::context::{ContextSetup, arch, config, pmgr};
6use self::dmem::Dmem;
7use self::imgact::Ps4Abi;
8use self::malloc::KernelHeap;
9use self::proc::{Fork, Proc, ProcAbi, ProcMgr, Thread};
10use self::sched::sleep;
11use self::uma::Uma;
12use self::vm::Vm;
13use ::config::{BootEnv, MapType};
14use alloc::string::String;
15use alloc::sync::Arc;
16use core::cmp::min;
17use core::mem::zeroed;
18use humansize::{DECIMAL, SizeFormatter};
19use krt::{boot_env, info, warn};
20
21#[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")]
22#[cfg_attr(target_arch = "x86_64", path = "x86_64.rs")]
23mod arch;
24mod config;
25mod context;
26mod dmem;
27mod event;
28mod imgact;
29mod imgfmt;
30mod lock;
31mod malloc;
32mod proc;
33mod sched;
34mod signal;
35mod subsystem;
36mod trap;
37mod uma;
38mod vm;
39
40extern crate alloc;
41
42#[cfg_attr(target_os = "none", unsafe(no_mangle))]
46fn main(config: &'static ::config::Config) -> ! {
47 let config = Config::new(config);
49 let cpu = self::arch::identify_cpu();
50 let hw = match boot_env() {
51 BootEnv::Vm(vm) => vm.hypervisor(),
52 };
53
54 info!(
55 concat!(
56 "Starting Obliteration Kernel on {}.\n",
57 "cpu_vendor : {} × {}\n",
58 "cpu_id : {:#x}\n",
59 "boot_parameter.idps.product: {}"
60 ),
61 String::from_utf8_lossy(hw),
62 cpu.cpu_vendor,
63 config.max_cpu(),
64 cpu.cpu_id,
65 config.idps().product
66 );
67
68 let arch = unsafe { self::arch::setup_main_cpu(cpu) };
71
72 let proc0 = Proc::new_bare(Arc::new(Proc0Abi));
74
75 let proc0 = Arc::new(proc0);
77 let thread0 = Thread::new_bare(proc0);
78
79 let thread0 = Arc::new(thread0);
81
82 unsafe { self::context::run_with_context(config, arch, 0, thread0, setup, run) };
83}
84
85fn setup() -> ContextSetup {
86 let mut mi = load_memory_map();
88 let dmem = Dmem::new(&mut mi);
89
90 info!(
91 concat!("DMEM Mode : {}\n", "DMEM Config: {}"),
92 dmem.mode(),
93 dmem.config().name
94 );
95
96 let procs = ProcMgr::new();
101 let uma = init_vm(&mi); ContextSetup { uma, pmgr: procs }
104}
105
106fn run() -> ! {
107 info!("Activating stage 2 heap.");
109
110 unsafe { KERNEL_HEAP.activate_stage2() };
111
112 create_init(); swapper(); }
116
117fn load_memory_map() -> MemoryInfo {
124 let mut physmap = [0u64; 60];
126 let mut last = 0usize;
127 let map = match boot_env() {
128 BootEnv::Vm(v) => v.memory_map.as_slice(),
129 };
130
131 'top: for m in map {
132 match m.ty {
134 MapType::None => break,
135 MapType::Ram => (),
136 MapType::Reserved => continue,
137 }
138
139 if m.len == 0 {
141 break;
142 }
143
144 let mut insert_idx = last + 2;
146 let mut j = 0usize;
147
148 while j <= last {
149 if m.base < physmap[j + 1] {
150 if m.base + m.len > physmap[j] {
152 warn!("Overlapping memory regions, ignoring second region.");
153 continue 'top;
154 }
155
156 insert_idx = j;
157 break;
158 }
159
160 j += 2;
161 }
162
163 if insert_idx <= last && m.base + m.len == physmap[insert_idx] {
166 physmap[insert_idx] = m.base;
167 continue;
168 }
169
170 if insert_idx > 0 && m.base == physmap[insert_idx - 1] {
173 physmap[insert_idx - 1] = m.base + m.len;
174 continue;
175 }
176
177 last += 2;
178
179 if last == physmap.len() {
180 warn!("Too many segments in the physical address map, giving up.");
181 break;
182 }
183
184 #[allow(clippy::while_immutable_condition)]
187 while insert_idx < last {
188 todo!()
189 }
190
191 physmap[insert_idx] = m.base;
192 physmap[insert_idx + 1] = m.base + m.len;
193 }
194
195 if physmap[1] == 0 {
199 panic!("no memory map provided to the kernel");
200 }
201
202 let page_size = PAGE_SIZE.get().try_into().unwrap();
204 let page_mask = !u64::try_from(PAGE_MASK.get()).unwrap();
205 let mut initial_memory_size = 0;
206 let mut boot_area = None;
207
208 for i in (0..=last).step_by(2) {
209 if physmap[i] == 0 {
211 boot_area = Some(physmap[i + 1] / 1024);
213 }
214
215 let start = physmap[i].next_multiple_of(page_size);
217 let end = physmap[i + 1] & page_mask;
218
219 initial_memory_size += end.saturating_sub(start);
220 }
221
222 let boot_area = match boot_area {
224 Some(v) => v,
225 None => panic!("no boot area provided to the kernel"),
226 };
227
228 let boot_info = adjust_boot_area(physmap[1] / 1024);
231
232 physmap[1] = boot_info.page_tables;
233
234 let mut end_page = physmap[last + 1] >> PAGE_SHIFT;
236 let config = config();
237
238 if let Some(v) = config.env("hw.physmem") {
239 end_page = min(v.parse::<u64>().unwrap() >> PAGE_SHIFT, end_page);
240 }
241
242 let mut unk = 0;
244
245 for i in (0..=last).rev().step_by(2) {
246 unk = (unk + physmap[i + 1]) - physmap[i];
247 }
248
249 let mut unk = u32::from((unk >> 33) != 0);
251
252 #[cfg(target_arch = "x86_64")]
255 let cpu_ok = (arch().cpu.cpu_id & 0xffffff80) == 0x740f00;
256 #[cfg(not(target_arch = "x86_64"))]
257 let cpu_ok = true;
258
259 if cpu_ok && !config.dipsw(Dipsw::Unk140) && !config.dipsw(Dipsw::Unk146) {
260 unk |= 2;
261 }
262
263 load_pmap();
264
265 MemoryInfo {
267 physmap,
268 physmap_last: last,
269 boot_area,
270 boot_info,
271 initial_memory_size,
272 end_page,
273 unk,
274 }
275}
276
277fn adjust_boot_area(original: u64) -> BootInfo {
284 let page_size = u64::try_from(PAGE_SIZE.get()).unwrap();
286 let page_mask = !u64::try_from(PAGE_MASK.get()).unwrap();
287 let need = u64::try_from(arch().secondary_start.len()).unwrap();
288 let addr = (original * 1024) & page_mask;
289
290 let addr = if need <= ((original * 1024) & 0xC00) {
292 addr
293 } else {
294 addr - page_size
295 };
296
297 BootInfo {
298 addr,
299 page_tables: addr - (page_size * 3),
300 }
301}
302
303fn load_pmap() {
310 let config = config();
311
312 if config.is_allow_disabling_aslr() && config.dipsw(Dipsw::DisabledKaslr) {
313 todo!()
314 } else {
315 }
318}
319
320fn init_vm(mi: &MemoryInfo) -> Arc<Uma> {
327 let vm = unsafe { Vm::new(mi).unwrap() };
329
330 info!(
331 concat!(
332 "initial_memory_size: {} ({})\n",
333 "basemem : {:#x}\n",
334 "boot_address : {:#x}\n",
335 "mptramp_pagetables : {:#x}\n",
336 "Maxmem : {:#x}"
337 ),
338 vm.initial_memory_size(),
339 SizeFormatter::new(vm.initial_memory_size(), DECIMAL),
340 vm.boot_area(),
341 vm.boot_addr(),
342 vm.boot_tables(),
343 vm.end_page()
344 );
345
346 Uma::new(vm)
348}
349
350fn create_init() {
357 let pmgr = pmgr().unwrap();
358 let abi = Arc::new(Ps4Abi);
359 let flags = Fork::CopyFd | Fork::CreateProcess;
360
361 pmgr.fork(abi, flags).unwrap();
362
363 todo!()
364}
365
366fn swapper() -> ! {
373 let procs = pmgr().unwrap();
375
376 loop {
377 let procs = procs.list();
379
380 if procs.len() == 0 {
381 sleep();
384 continue;
385 }
386
387 todo!();
388 }
389}
390
391struct Proc0Abi;
395
396impl ProcAbi for Proc0Abi {
397 fn syscall_handler(&self) {
399 unimplemented!()
400 }
401}
402
403struct MemoryInfo {
405 physmap: [u64; 60],
406 physmap_last: usize,
407 boot_area: u64,
408 boot_info: BootInfo,
409 initial_memory_size: u64,
410 end_page: u64,
411 unk: u32, }
413
414struct BootInfo {
416 addr: u64,
417 page_tables: u64,
418}
419
420#[allow(dead_code)]
423#[cfg_attr(target_os = "none", global_allocator)]
424static KERNEL_HEAP: KernelHeap = unsafe { KernelHeap::new(&raw mut PRIMITIVE_HEAP) };
425static mut PRIMITIVE_HEAP: [u8; 1024 * 1024] = unsafe { zeroed() };