obkrnl/context/mod.rs
1pub use self::arc::*;
2pub use self::arch::*;
3pub use self::local::*;
4
5use crate::arch::ArchConfig;
6use crate::config::Config;
7use crate::proc::{ProcMgr, Thread};
8use crate::uma::Uma;
9use alloc::rc::Rc;
10use alloc::sync::Arc;
11use core::marker::PhantomData;
12use core::mem::offset_of;
13use core::pin::pin;
14use core::ptr::null;
15use core::sync::atomic::Ordering;
16
17mod arc;
18#[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")]
19#[cfg_attr(target_arch = "x86_64", path = "x86_64.rs")]
20mod arch;
21mod local;
22
23/// See `pcpu_init` on the Orbis for a reference.
24///
25/// # Safety
26/// - This function can be called only once per CPU.
27/// - `config` must be the same object for all context.
28/// - `arch` must be the same object for all context.
29/// - `cpu` must be unique and valid.
30/// - `setup` must return the same objects for all context.
31///
32/// # Reference offsets
33/// | Version | Offset |
34/// |---------|--------|
35/// |PS4 11.00|0x08DA70|
36pub unsafe fn run_with_context(
37 config: Arc<Config>,
38 arch: Arc<ArchConfig>,
39 cpu: usize,
40 td: Arc<Thread>,
41 setup: fn() -> ContextSetup,
42 main: fn() -> !,
43) -> ! {
44 // We use a different mechanism here. The Orbis put all of pcpu at a global level but we put it
45 // on each CPU stack instead.
46 let mut cx = pin!(Context::new(
47 Base {
48 config: Arc::into_raw(config),
49 arch: Arc::into_raw(arch.clone()),
50 cpu,
51 thread: Arc::into_raw(td),
52 uma: null(),
53 pmgr: null(),
54 },
55 &arch,
56 ));
57
58 unsafe { cx.as_mut().activate() };
59
60 // Prevent any code before and after this line to cross this line.
61 core::sync::atomic::fence(Ordering::AcqRel);
62
63 // Setup.
64 let r = setup();
65
66 // SAFETY: We did not move out the value.
67 unsafe { cx.as_mut().get_unchecked_mut().base.uma = Arc::into_raw(r.uma) };
68 unsafe { cx.as_mut().get_unchecked_mut().base.pmgr = Arc::into_raw(r.pmgr) };
69
70 main();
71}
72
73/// # Interrupt safety
74/// This function can be called from interrupt handler.
75pub fn current_config() -> BorrowedArc<Config> {
76 // It does not matter if we are on a different CPU after we load the Context::arch because it is
77 // always the same for all CPU.
78 unsafe {
79 BorrowedArc::from_non_null(Context::load_static_ptr::<{ offset_of!(Base, config) }, _>())
80 }
81}
82
83/// # Interrupt safety
84/// This function can be called from interrupt handler.
85pub fn current_arch() -> BorrowedArc<ArchConfig> {
86 // It does not matter if we are on a different CPU after we load the Context::arch because it is
87 // always the same for all CPU.
88 unsafe {
89 BorrowedArc::from_non_null(Context::load_static_ptr::<{ offset_of!(Base, arch) }, _>())
90 }
91}
92
93/// # Interrupt safety
94/// This function is interrupt safe.
95pub fn current_thread() -> BorrowedArc<Thread> {
96 // It does not matter if we are on a different CPU after we load the Context::thread because it
97 // is going to be the same one since it represent the current thread.
98 unsafe {
99 BorrowedArc::from_non_null(Context::load_static_ptr::<{ current_thread_offset() }, _>())
100 }
101}
102
103pub const fn current_thread_offset() -> usize {
104 offset_of!(Base, thread)
105}
106
107/// Returns [`None`] if called from context setup function.
108///
109/// # Interrupt safety
110/// This function can be called from interrupt handler.
111pub fn current_uma() -> Option<BorrowedArc<Uma>> {
112 // It does not matter if we are on a different CPU after we load the Context::uma because it is
113 // always the same for all CPU.
114 unsafe { BorrowedArc::new(Context::load_ptr::<{ offset_of!(Base, uma) }, _>()) }
115}
116
117/// Returns [`None`] if called from context setup function.
118///
119/// # Interrupt safety
120/// This function can be called from interrupt handle.
121pub fn current_procmgr() -> Option<BorrowedArc<ProcMgr>> {
122 // It does not matter if we are on a different CPU after we load the Context::pmgr because it is
123 // always the same for all CPU.
124 unsafe { BorrowedArc::new(Context::load_ptr::<{ offset_of!(Base, pmgr) }, _>()) }
125}
126
127/// Pin the calling thread to one CPU.
128///
129/// This thread will never switch to a different CPU until the returned [`PinnedContext`] is dropped
130/// and it is not allowed to sleep.
131///
132/// See `critical_enter` and `critical_exit` on the PS4 for a reference. Beware that our
133/// implementation a bit different. The PS4 **allow the thread to sleep but we don't**.
134pub fn pin_cpu() -> PinnedContext {
135 let td = current_thread();
136
137 // Prevent all operations after this to get executed before this line. See
138 // https://github.com/rust-lang/rust/issues/130655#issuecomment-2365189317 for the explanation.
139 unsafe { td.active_pins().fetch_add(1, Ordering::Acquire) };
140
141 PinnedContext {
142 td,
143 phantom: PhantomData,
144 }
145}
146
147/// Output of the context setup function.
148pub struct ContextSetup {
149 pub uma: Arc<Uma>,
150 pub pmgr: Arc<ProcMgr>,
151}
152
153/// Implementation of `pcpu` structure.
154///
155/// Access to this structure must be done by **atomic reading or writing its field directly**. It is
156/// not safe to have a temporary a pointer or reference to this struct or its field because the CPU
157/// might get interrupted, which mean it is possible for the next instruction to get executed on
158/// a different CPU if the interrupt cause the CPU to switch the task.
159///
160/// The activation of this struct is a minimum requirements for a new CPU to call most of the other
161/// functions. The new CPU should call [`run_with_context()`] as soon as possible. We don't make the
162/// functions that require this context as `unsafe` nor make it check for the context because it
163/// will be (almost) all of it. So we impose this requirement on a function that setup a CPU
164/// instead.
165///
166/// Beware for any type that implement [`Drop`] because it may access the CPU context. For maximum
167/// safety the CPU setup function **must not cause any value of the kernel type to drop before
168/// context is activated**. It is safe to drop values of Rust core type (e.g. `String`) **only on a
169/// main CPU** because the only kernel functions it can call into is either stage 1 allocator or
170/// panic handler, both of them does not require a CPU context.
171#[repr(C)]
172struct Base {
173 config: *const Config,
174 arch: *const ArchConfig,
175 cpu: usize, // pc_cpuid
176 thread: *const Thread, // pc_curthread
177 uma: *const Uma,
178 pmgr: *const ProcMgr,
179}
180
181impl Drop for Base {
182 fn drop(&mut self) {
183 panic!("dropping Context can cause a bug so it is not supported");
184 }
185}
186
187/// RAII struct to pin the current thread to a CPU.
188///
189/// This struct must not implement [`Send`] and [`Sync`].
190pub struct PinnedContext {
191 td: BorrowedArc<Thread>,
192 phantom: PhantomData<Rc<()>>, // Make sure we are !Send and !Sync.
193}
194
195impl PinnedContext {
196 /// See [`CpuLocal`] for a safe alternative if you want to store per-CPU value.
197 ///
198 /// # Safety
199 /// Anything that derive from the returned value will invalid when this [`PinnedContext`]
200 /// dropped.
201 pub unsafe fn cpu(&self) -> usize {
202 unsafe { Context::load_volatile_usize::<{ offset_of!(Base, cpu) }>() }
203 }
204}
205
206impl Drop for PinnedContext {
207 fn drop(&mut self) {
208 // Prevent all operations before this to get executed after this line. See
209 // https://github.com/rust-lang/rust/issues/130655#issuecomment-2365189317 for the explanation.
210 unsafe { self.td.active_pins().fetch_sub(1, Ordering::Release) };
211
212 // TODO: Implement td_owepreempt.
213 }
214}