add os[1-8]-ref for os refereces, add guide, add README

This commit is contained in:
Yu Chen
2022-06-27 22:22:44 +08:00
parent 7c1679774c
commit d752a67137
360 changed files with 32863 additions and 1 deletions

View File

@@ -0,0 +1,33 @@
//! Implementation of [`TaskContext`]
use crate::trap::trap_return;
#[derive(Copy, Clone)]
#[repr(C)]
/// task context structure containing some registers
pub struct TaskContext {
/// Ret position after task switching
pub ra: usize,
/// Stack pointer
pub sp: usize,
/// s0-11 register, callee saved
pub s: [usize; 12],
}
impl TaskContext {
pub fn zero_init() -> Self {
Self {
ra: 0,
sp: 0,
s: [0; 12],
}
}
pub fn goto_trap_return(kstack_ptr: usize) -> Self {
Self {
ra: trap_return as usize,
sp: kstack_ptr,
s: [0; 12],
}
}
}

262
os8-ref/src/task/id.rs Normal file
View File

@@ -0,0 +1,262 @@
use super::ProcessControlBlock;
use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE, TRAP_CONTEXT, USER_STACK_SIZE};
use crate::mm::{MapPermission, PhysPageNum, VirtAddr, KERNEL_SPACE};
use crate::sync::UPSafeCell;
use alloc::{
sync::{Arc, Weak},
vec::Vec,
};
use lazy_static::*;
pub struct RecycleAllocator {
current: usize,
recycled: Vec<usize>,
}
impl RecycleAllocator {
pub fn new() -> Self {
RecycleAllocator {
current: 0,
recycled: Vec::new(),
}
}
pub fn alloc(&mut self) -> usize {
if let Some(id) = self.recycled.pop() {
id
} else {
self.current += 1;
self.current - 1
}
}
pub fn dealloc(&mut self, id: usize) {
assert!(id < self.current);
assert!(
!self.recycled.iter().any(|i| *i == id),
"id {} has been deallocated!",
id
);
self.recycled.push(id);
}
}
lazy_static! {
static ref PID_ALLOCATOR: UPSafeCell<RecycleAllocator> =
unsafe { UPSafeCell::new(RecycleAllocator::new()) };
static ref KSTACK_ALLOCATOR: UPSafeCell<RecycleAllocator> =
unsafe { UPSafeCell::new(RecycleAllocator::new()) };
}
pub struct PidHandle(pub usize);
pub fn pid_alloc() -> PidHandle {
PidHandle(PID_ALLOCATOR.exclusive_access().alloc())
}
impl Drop for PidHandle {
fn drop(&mut self) {
PID_ALLOCATOR.exclusive_access().dealloc(self.0);
}
}
/// Return (bottom, top) of a kernel stack in kernel space.
pub fn kernel_stack_position(kstack_id: usize) -> (usize, usize) {
let top = TRAMPOLINE - kstack_id * (KERNEL_STACK_SIZE + PAGE_SIZE);
let bottom = top - KERNEL_STACK_SIZE;
(bottom, top)
}
pub struct KernelStack(pub usize);
pub fn kstack_alloc() -> KernelStack {
let kstack_id = KSTACK_ALLOCATOR.exclusive_access().alloc();
let (kstack_bottom, kstack_top) = kernel_stack_position(kstack_id);
//println!("kstack_alloc kstack_bottom: {:#x?}, kstack_top: {:#x?}", kstack_bottom, kstack_top);
KERNEL_SPACE.exclusive_access().insert_framed_area(
kstack_bottom.into(),
kstack_top.into(),
MapPermission::R | MapPermission::W,
);
KernelStack(kstack_id)
}
impl Drop for KernelStack {
fn drop(&mut self) {
let (kernel_stack_bottom, _) = kernel_stack_position(self.0);
let kernel_stack_bottom_va: VirtAddr = kernel_stack_bottom.into();
// let kernel_stack_bottom_pa: PhysAddr = kernel_stack_bottom.into();
// println!("kstack_drop kstack_bottom: va: {:#x?}, pa: {:#x?}", kernel_stack_bottom_va, kernel_stack_bottom_pa);
KERNEL_SPACE
.exclusive_access()
.remove_area_with_start_vpn(kernel_stack_bottom_va.into());
}
}
impl KernelStack {
#[allow(unused)]
pub fn push_on_top<T>(&self, value: T) -> *mut T
where
T: Sized,
{
let kernel_stack_top = self.get_top();
let ptr_mut = (kernel_stack_top - core::mem::size_of::<T>()) as *mut T;
unsafe {
*ptr_mut = value;
}
ptr_mut
}
pub fn get_top(&self) -> usize {
let (_, kernel_stack_top) = kernel_stack_position(self.0);
kernel_stack_top
}
}
pub struct TaskUserRes {
pub tid: usize,
pub ustack_base: usize,
pub process: Weak<ProcessControlBlock>,
}
fn trap_cx_bottom_from_tid(tid: usize) -> usize {
TRAP_CONTEXT - tid * PAGE_SIZE
}
fn ustack_bottom_from_tid(ustack_base: usize, tid: usize) -> usize {
ustack_base + tid * (PAGE_SIZE + USER_STACK_SIZE)
}
impl TaskUserRes {
pub fn new(
process: Arc<ProcessControlBlock>,
ustack_base: usize,
alloc_user_res: bool,
) -> Self {
let tid = process.inner_exclusive_access().alloc_tid();
let task_user_res = Self {
tid,
ustack_base,
process: Arc::downgrade(&process),
};
if alloc_user_res {
task_user_res.alloc_user_res();
}
task_user_res
}
pub fn alloc_user_res(&self) {
let process = self.process.upgrade().unwrap();
let mut process_inner = process.inner_exclusive_access();
// alloc user stack
let ustack_bottom = ustack_bottom_from_tid(self.ustack_base, self.tid);
let ustack_top = ustack_bottom + USER_STACK_SIZE;
process_inner.memory_set.insert_framed_area(
ustack_bottom.into(),
ustack_top.into(),
MapPermission::R | MapPermission::W | MapPermission::U,
);
// alloc trap_cx
let trap_cx_bottom = trap_cx_bottom_from_tid(self.tid);
let trap_cx_top = trap_cx_bottom + PAGE_SIZE;
process_inner.memory_set.insert_framed_area(
trap_cx_bottom.into(),
trap_cx_top.into(),
MapPermission::R | MapPermission::W,
);
}
fn dealloc_user_res(&self) {
// dealloc tid
let process = self.process.upgrade().unwrap();
let mut process_inner = process.inner_exclusive_access();
// dealloc ustack manually
let ustack_bottom_va: VirtAddr = ustack_bottom_from_tid(self.ustack_base, self.tid).into();
process_inner
.memory_set
.remove_area_with_start_vpn(ustack_bottom_va.into());
// dealloc trap_cx manually
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
process_inner
.memory_set
.remove_area_with_start_vpn(trap_cx_bottom_va.into());
}
#[allow(unused)]
pub fn alloc_tid(&mut self) {
self.tid = self
.process
.upgrade()
.unwrap()
.inner_exclusive_access()
.alloc_tid();
}
pub fn dealloc_tid(&self) {
let process = self.process.upgrade().unwrap();
let mut process_inner = process.inner_exclusive_access();
process_inner.dealloc_tid(self.tid);
}
pub fn trap_cx_user_va(&self) -> usize {
trap_cx_bottom_from_tid(self.tid)
}
pub fn trap_cx_ppn(&self) -> PhysPageNum {
let process = self.process.upgrade().unwrap();
let process_inner = process.inner_exclusive_access();
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
process_inner
.memory_set
.translate(trap_cx_bottom_va.into())
.unwrap()
.ppn()
}
pub fn ustack_base(&self) -> usize {
self.ustack_base
}
pub fn ustack_top(&self) -> usize {
ustack_bottom_from_tid(self.ustack_base, self.tid) + USER_STACK_SIZE
}
}
impl Drop for TaskUserRes {
fn drop(&mut self) {
self.dealloc_tid();
self.dealloc_user_res();
}
}
use alloc::alloc::{alloc, dealloc, Layout};
#[derive(Clone)]
pub struct KStack(usize);
const STACK_SIZE: usize = 0x8000;
impl KStack {
pub fn new() -> KStack {
let bottom =
unsafe { alloc(Layout::from_size_align(STACK_SIZE, STACK_SIZE).unwrap()) } as usize;
KStack(bottom)
}
pub fn top(&self) -> usize {
self.0 + STACK_SIZE
}
}
use core::fmt::{self, Debug, Formatter};
impl Debug for KStack {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("KStack:{:#x}", self.0))
}
}
impl Drop for KStack {
fn drop(&mut self) {
unsafe {
dealloc(
self.0 as _,
Layout::from_size_align(STACK_SIZE, STACK_SIZE).unwrap(),
);
}
}
}

View File

@@ -0,0 +1,76 @@
use super::suspend_current_and_run_next;
use crate::task::{add_task, schedule, TaskContext, TaskControlBlock};
use alloc::sync::Arc;
// NOTE: This module is not required to finish the lab5, though you may run
// kernel_stackless_coroutine_test() in kernel main() to see what happens
#[no_mangle]
pub fn kthread_create(f: fn()) {
println!("kthread_create");
// create kernel thread
let new_tcb = TaskControlBlock::create_kthread(f);
// let kernel_stack = new_tcb.get_kernel_stack();
let new_task = Arc::new(new_tcb);
// add kernel thread into TASK_MANAGER
// println!("add task");
add_task(Arc::clone(&new_task));
}
#[no_mangle]
pub fn kernel_stackful_coroutine_test() {
println!("kernel_stackful_coroutine_test");
kthread_create(|| {
let id = 1;
println!("kernel thread {:?} STARTING", id);
for i in 0..10 {
println!("kernel thread: {} counter: {}", id, i);
}
println!("kernel thread {:?} FINISHED", id);
kthread_stop();
});
kthread_create(|| {
let id = 2;
println!("kernel thread {:?} STARTING", id);
for i in 0..10 {
println!("kernel thread: {} counter: {}", id, i);
kthread_yield();
}
println!("kernel thread {:?} FINISHED", id);
kthread_stop();
});
kthread_create(|| {
let id = 3;
println!("kernel thread {:?} STARTING", id);
for i in 0..10 {
println!("kernel thread: {} counter: {}", id, i);
kthread_yield();
}
println!("kernel thread {:?} FINISHED", id);
kthread_stop();
});
}
pub fn kthread_stop() {
do_exit();
}
#[no_mangle]
pub fn do_exit() {
println!("kthread do exit");
exit_kthread_and_run_next(0);
panic!("Unreachable in sys_exit!");
}
pub fn kthread_yield() {
suspend_current_and_run_next();
}
#[no_mangle]
pub fn exit_kthread_and_run_next(exit_code: i32) {
println!("exit_kthread_and_run_next with code: {}", exit_code);
// we do not have to save task context
let mut _unused = TaskContext::zero_init();
schedule(&mut _unused as *mut _);
}

View File

@@ -0,0 +1,46 @@
//! Implementation of [`TaskManager`]
//!
//! It is only used to manage processes and schedule process based on ready queue.
//! Other CPU process monitoring functions are in Processor.
use super::TaskControlBlock;
use crate::sync::UPSafeCell;
use alloc::collections::VecDeque;
use alloc::sync::Arc;
use lazy_static::*;
pub struct TaskManager {
ready_queue: VecDeque<Arc<TaskControlBlock>>,
}
/// A simple FIFO scheduler.
impl TaskManager {
pub fn new() -> Self {
Self {
ready_queue: VecDeque::new(),
}
}
/// Add process back to ready queue
pub fn add(&mut self, task: Arc<TaskControlBlock>) {
self.ready_queue.push_back(task);
}
/// Take a process out of the ready queue
pub fn fetch(&mut self) -> Option<Arc<TaskControlBlock>> {
self.ready_queue.pop_front()
}
}
lazy_static! {
/// TASK_MANAGER instance through lazy_static!
pub static ref TASK_MANAGER: UPSafeCell<TaskManager> =
unsafe { UPSafeCell::new(TaskManager::new()) };
}
pub fn add_task(task: Arc<TaskControlBlock>) {
TASK_MANAGER.exclusive_access().add(task);
}
pub fn fetch_task() -> Option<Arc<TaskControlBlock>> {
TASK_MANAGER.exclusive_access().fetch()
}

158
os8-ref/src/task/mod.rs Normal file
View File

@@ -0,0 +1,158 @@
//! Implementation of process management mechanism
//!
//! Here is the entry for process scheduling required by other modules
//! (such as syscall or clock interrupt).
//! By suspending or exiting the current process, you can
//! modify the process state, manage the process queue through TASK_MANAGER,
//! and switch the control flow through PROCESSOR.
//!
//! Be careful when you see [`__switch`]. Control flow around this function
//! might not be what you expect.
mod context;
mod id;
pub mod kthread;
mod manager;
mod process;
mod processor;
pub mod stackless_coroutine;
mod switch;
#[allow(clippy::module_inception)]
mod task;
pub use crate::syscall::process::TaskInfo;
use crate::{
fs::{open_file, OpenFlags},
task::id::TaskUserRes,
};
use alloc::{sync::Arc, vec::Vec};
pub use context::TaskContext;
pub use id::{kstack_alloc, pid_alloc, KernelStack, PidHandle};
pub use kthread::kernel_stackful_coroutine_test;
use lazy_static::*;
pub use manager::add_task;
use manager::fetch_task;
use process::ProcessControlBlock;
pub use processor::{
current_process, current_task, current_trap_cx, current_trap_cx_user_va, current_user_token,
run_tasks, schedule, take_current_task,
};
pub use stackless_coroutine::kernel_stackless_coroutine_test;
use switch::__switch;
pub use task::{TaskControlBlock, TaskStatus};
pub fn block_current_and_run_next() {
let task = take_current_task().unwrap();
let mut task_inner = task.inner_exclusive_access();
let task_cx_ptr = &mut task_inner.task_cx as *mut TaskContext;
task_inner.task_status = TaskStatus::Blocking;
drop(task_inner);
schedule(task_cx_ptr);
}
/// Make current task suspended and switch to the next task
pub fn suspend_current_and_run_next() {
// There must be an application running.
let task = take_current_task().unwrap();
// ---- access current TCB exclusively
let mut task_inner = task.inner_exclusive_access();
let task_cx_ptr = &mut task_inner.task_cx as *mut TaskContext;
// Change status to Ready
task_inner.task_status = TaskStatus::Ready;
drop(task_inner);
// ---- release current PCB
// push back to ready queue.
add_task(task);
// jump to scheduling cycle
schedule(task_cx_ptr);
}
/// Exit current task, recycle process resources and switch to the next task
pub fn exit_current_and_run_next(exit_code: i32) {
// take from Processor
let task = take_current_task().unwrap();
// **** access current TCB exclusively
let mut task_inner = task.inner_exclusive_access();
let process = task.process.upgrade().unwrap();
let tid = task_inner.res.as_ref().unwrap().tid;
// Record exit code
task_inner.exit_code = Some(exit_code);
task_inner.res = None;
// here we do not remove the thread since we are still using the kstack
// it will be deallocated when sys_waittid is called
drop(task_inner);
drop(task);
// debug!("task {} dropped", tid);
if tid == 0 {
let mut process_inner = process.inner_exclusive_access();
// mark this process as a zombie process
process_inner.is_zombie = true;
// record exit code of main process
process_inner.exit_code = exit_code;
// do not move to its parent but under initproc
// debug!("reparent");
// ++++++ access initproc PCB exclusively
{
let mut initproc_inner = INITPROC.inner_exclusive_access();
for child in process_inner.children.iter() {
child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC));
initproc_inner.children.push(child.clone());
}
}
let mut recycle_res = Vec::<TaskUserRes>::new();
// debug!("deallocate user res");
// deallocate user res (including tid/trap_cx/ustack) of all threads
// it has to be done before we dealloc the whole memory_set
// otherwise they will be deallocated twice
for task in process_inner.tasks.iter().filter(|t| t.is_some()) {
let task = task.as_ref().unwrap();
let mut task_inner = task.inner_exclusive_access();
if let Some(res) = task_inner.res.take() {
recycle_res.push(res);
}
}
drop(process_inner);
recycle_res.clear();
let mut process_inner = process.inner_exclusive_access();
// debug!("deallocate pcb res");
process_inner.children.clear();
// deallocate other data in user space i.e. program code/data section
process_inner.memory_set.recycle_data_pages();
// drop file descriptors
process_inner.fd_table.clear();
}
// debug!("pcb dropped");
// ++++++ release parent PCB
drop(process);
// we do not have to save task context
let mut _unused = TaskContext::zero_init();
schedule(&mut _unused as *mut _);
}
lazy_static! {
/// Creation of initial process
///
/// the name "initproc" may be changed to any other app name like "usertests",
/// but we have user_shell, so we don't need to change it.
pub static ref INITPROC: Arc<ProcessControlBlock> = {
let inode = open_file("ch8b_initproc", OpenFlags::RDONLY).unwrap();
let v = inode.read_all();
ProcessControlBlock::new(v.as_slice())
};
}
pub fn add_initproc() {
// INITPROC must be referenced at least once so that it can be initialized
// through lazy_static
let _initproc = INITPROC.clone();
}

280
os8-ref/src/task/process.rs Normal file
View File

@@ -0,0 +1,280 @@
use super::id::RecycleAllocator;
use super::{add_task, pid_alloc, PidHandle, TaskControlBlock};
use crate::fs::{File, Stdin, Stdout};
use crate::mm::{translated_refmut, MemorySet, KERNEL_SPACE};
use crate::sync::{Condvar, Mutex, Semaphore, UPSafeCell};
use crate::trap::{trap_handler, TrapContext};
use alloc::string::String;
use alloc::sync::{Arc, Weak};
use alloc::vec;
use alloc::vec::Vec;
use core::cell::RefMut;
pub struct ProcessControlBlock {
// immutable
pub pid: PidHandle,
// mutable
inner: UPSafeCell<ProcessControlBlockInner>,
}
// LAB5 HINT: you may add data structures for deadlock detection here
pub struct ProcessControlBlockInner {
pub is_zombie: bool,
pub memory_set: MemorySet,
pub parent: Option<Weak<ProcessControlBlock>>,
pub children: Vec<Arc<ProcessControlBlock>>,
pub exit_code: i32,
pub fd_table: Vec<Option<Arc<dyn File + Send + Sync>>>,
pub tasks: Vec<Option<Arc<TaskControlBlock>>>,
pub task_res_allocator: RecycleAllocator,
pub mutex_list: Vec<Option<Arc<dyn Mutex>>>,
pub semaphore_list: Vec<Option<Arc<Semaphore>>>,
pub condvar_list: Vec<Option<Arc<Condvar>>>,
}
impl ProcessControlBlockInner {
#[allow(unused)]
pub fn get_user_token(&self) -> usize {
self.memory_set.token()
}
pub fn alloc_fd(&mut self) -> usize {
if let Some(fd) = (0..self.fd_table.len()).find(|fd| self.fd_table[*fd].is_none()) {
fd
} else {
self.fd_table.push(None);
self.fd_table.len() - 1
}
}
pub fn alloc_tid(&mut self) -> usize {
self.task_res_allocator.alloc()
}
pub fn dealloc_tid(&mut self, tid: usize) {
self.task_res_allocator.dealloc(tid)
}
pub fn thread_count(&self) -> usize {
self.tasks.len()
}
pub fn get_task(&self, tid: usize) -> Arc<TaskControlBlock> {
self.tasks[tid].as_ref().unwrap().clone()
}
}
impl ProcessControlBlock {
pub fn inner_exclusive_access(&self) -> RefMut<'_, ProcessControlBlockInner> {
self.inner.exclusive_access()
}
// LAB5 HINT: How to initialize deadlock data structures?
pub fn new(elf_data: &[u8]) -> Arc<Self> {
// memory_set with elf program headers/trampoline/trap context/user stack
let (memory_set, ustack_base, entry_point) = MemorySet::from_elf(elf_data);
// allocate a pid
let pid_handle = pid_alloc();
let process = Arc::new(Self {
pid: pid_handle,
inner: unsafe {
UPSafeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: None,
children: Vec::new(),
exit_code: 0,
fd_table: vec![
// 0 -> stdin
Some(Arc::new(Stdin)),
// 1 -> stdout
Some(Arc::new(Stdout)),
// 2 -> stderr
Some(Arc::new(Stdout)),
],
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
condvar_list: Vec::new(),
})
},
});
// create a main thread, we should allocate ustack and trap_cx here
let task = Arc::new(TaskControlBlock::new(
Arc::clone(&process),
ustack_base,
true,
));
// prepare trap_cx of main thread
let task_inner = task.inner_exclusive_access();
let trap_cx = task_inner.get_trap_cx();
let ustack_top = task_inner.res.as_ref().unwrap().ustack_top();
let kernel_stack_top = task.kernel_stack.get_top();
drop(task_inner);
*trap_cx = TrapContext::app_init_context(
entry_point,
ustack_top,
KERNEL_SPACE.exclusive_access().token(),
kernel_stack_top,
trap_handler as usize,
);
// add main thread to the process
let mut process_inner = process.inner_exclusive_access();
process_inner.tasks.push(Some(Arc::clone(&task)));
drop(process_inner);
// add main thread to scheduler
add_task(task);
process
}
// LAB5 HINT: How to initialize deadlock data structures?
/// Load a new elf to replace the original application address space and start execution
/// Only support processes with a single thread.
pub fn exec(self: &Arc<Self>, elf_data: &[u8], args: Vec<String>) {
assert_eq!(self.inner_exclusive_access().thread_count(), 1);
// memory_set with elf program headers/trampoline/trap context/user stack
let (memory_set, ustack_base, entry_point) = MemorySet::from_elf(elf_data);
let new_token = memory_set.token();
// substitute memory_set
self.inner_exclusive_access().memory_set = memory_set;
// then we alloc user resource for main thread again
// since memory_set has been changed
let task = self.inner_exclusive_access().get_task(0);
let mut task_inner = task.inner_exclusive_access();
task_inner.res.as_mut().unwrap().ustack_base = ustack_base;
task_inner.res.as_mut().unwrap().alloc_user_res();
task_inner.trap_cx_ppn = task_inner.res.as_mut().unwrap().trap_cx_ppn();
// push arguments on user stack
let mut user_sp = task_inner.res.as_mut().unwrap().ustack_top();
user_sp -= (args.len() + 1) * core::mem::size_of::<usize>();
let argv_base = user_sp;
let mut argv: Vec<_> = (0..=args.len())
.map(|arg| {
translated_refmut(
new_token,
(argv_base + arg * core::mem::size_of::<usize>()) as *mut usize,
)
})
.collect();
*argv[args.len()] = 0;
for i in 0..args.len() {
user_sp -= args[i].len() + 1;
*argv[i] = user_sp;
let mut p = user_sp;
for c in args[i].as_bytes() {
*translated_refmut(new_token, p as *mut u8) = *c;
p += 1;
}
*translated_refmut(new_token, p as *mut u8) = 0;
}
// make the user_sp aligned to 8B for k210 platform
user_sp -= user_sp % core::mem::size_of::<usize>();
// initialize trap_cx
let mut trap_cx = TrapContext::app_init_context(
entry_point,
user_sp,
KERNEL_SPACE.exclusive_access().token(),
task.kernel_stack.get_top(),
trap_handler as usize,
);
trap_cx.x[10] = args.len();
trap_cx.x[11] = argv_base;
*task_inner.get_trap_cx() = trap_cx;
}
// LAB5 HINT: How to initialize deadlock data structures?
/// Fork from parent to child
/// Only support processes with a single thread.
pub fn fork(self: &Arc<Self>) -> Arc<Self> {
let mut parent = self.inner_exclusive_access();
assert_eq!(parent.thread_count(), 1);
// clone parent's memory_set completely including trampoline/ustacks/trap_cxs
let memory_set = MemorySet::from_existed_user(&parent.memory_set);
// alloc a pid
let pid = pid_alloc();
// copy fd table
let mut new_fd_table: Vec<Option<Arc<dyn File + Send + Sync>>> = Vec::new();
for fd in parent.fd_table.iter() {
if let Some(file) = fd {
new_fd_table.push(Some(file.clone()));
} else {
new_fd_table.push(None);
}
}
// create child process pcb
let child = Arc::new(Self {
pid,
inner: unsafe {
UPSafeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: Some(Arc::downgrade(self)),
children: Vec::new(),
exit_code: 0,
fd_table: new_fd_table,
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
condvar_list: Vec::new(),
})
},
});
// add child
parent.children.push(Arc::clone(&child));
// create main thread of child process
let task = Arc::new(TaskControlBlock::new(
Arc::clone(&child),
parent
.get_task(0)
.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.ustack_base(),
// here we do not allocate trap_cx or ustack again
// but mention that we allocate a new kernel_stack here
false,
));
// attach task to child process
let mut child_inner = child.inner_exclusive_access();
child_inner.tasks.push(Some(Arc::clone(&task)));
drop(child_inner);
// modify kernel_stack_top in trap_cx of this thread
let task_inner = task.inner_exclusive_access();
let trap_cx = task_inner.get_trap_cx();
trap_cx.kernel_sp = task.kernel_stack.get_top();
drop(task_inner);
// add this thread to scheduler
add_task(task);
child
}
pub fn getpid(&self) -> usize {
self.pid.0
}
pub fn kernel_process() -> Arc<Self> {
let memory_set = MemorySet::kernel_copy();
let process = Arc::new(ProcessControlBlock {
pid: super::pid_alloc(),
inner: unsafe {
UPSafeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set: memory_set,
parent: None,
children: Vec::new(),
exit_code: 0,
fd_table: Vec::new(),
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
condvar_list: Vec::new(),
})
},
});
process
}
}

View File

@@ -0,0 +1,121 @@
//! Implementation of [`Processor`] and Intersection of control flow
//!
//! Here, the continuous operation of user apps in CPU is maintained,
//! the current running state of CPU is recorded,
//! and the replacement and transfer of control flow of different applications are executed.
use super::__switch;
use super::process::ProcessControlBlock;
use super::{fetch_task, TaskStatus};
use super::{TaskContext, TaskControlBlock};
use crate::sync::UPSafeCell;
use crate::trap::TrapContext;
use alloc::sync::Arc;
use lazy_static::*;
/// Processor management structure
pub struct Processor {
/// The task currently executing on the current processor
current: Option<Arc<TaskControlBlock>>,
/// The basic control flow of each core, helping to select and switch process
idle_task_cx: TaskContext,
}
impl Processor {
pub fn new() -> Self {
Self {
current: None,
idle_task_cx: TaskContext::zero_init(),
}
}
fn get_idle_task_cx_ptr(&mut self) -> *mut TaskContext {
&mut self.idle_task_cx as *mut _
}
pub fn take_current(&mut self) -> Option<Arc<TaskControlBlock>> {
self.current.take()
}
pub fn current(&self) -> Option<Arc<TaskControlBlock>> {
self.current.as_ref().map(|task| Arc::clone(task))
}
}
lazy_static! {
/// PROCESSOR instance through lazy_static!
pub static ref PROCESSOR: UPSafeCell<Processor> = unsafe { UPSafeCell::new(Processor::new()) };
}
/// The main part of process execution and scheduling
///
/// Loop fetch_task to get the process that needs to run,
/// and switch the process through __switch
pub fn run_tasks() {
loop {
let mut processor = PROCESSOR.exclusive_access();
if let Some(task) = fetch_task() {
// println!("task get!");
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();
// access coming task TCB exclusively
let mut task_inner = task.inner_exclusive_access();
let next_task_cx_ptr = &task_inner.task_cx as *const TaskContext;
task_inner.task_status = TaskStatus::Running;
drop(task_inner);
// release coming task TCB manually
processor.current = Some(task);
// release processor manually
drop(processor);
unsafe {
__switch(idle_task_cx_ptr, next_task_cx_ptr);
}
} else {
println!("no tasks available in run_tasks");
}
}
}
/// Get current task through take, leaving a None in its place
pub fn take_current_task() -> Option<Arc<TaskControlBlock>> {
PROCESSOR.exclusive_access().take_current()
}
/// Get a copy of the current task
pub fn current_task() -> Option<Arc<TaskControlBlock>> {
PROCESSOR.exclusive_access().current()
}
pub fn current_process() -> Arc<ProcessControlBlock> {
current_task().unwrap().process.upgrade().unwrap()
}
/// Get token of the address space of current task
pub fn current_user_token() -> usize {
let task = current_task().unwrap();
task.get_user_token()
}
/// Get the mutable reference to trap context of current task
pub fn current_trap_cx() -> &'static mut TrapContext {
current_task()
.unwrap()
.inner_exclusive_access()
.get_trap_cx()
}
pub fn current_trap_cx_user_va() -> usize {
current_task()
.unwrap()
.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.trap_cx_user_va()
}
/// Return to idle control flow for new scheduling
pub fn schedule(switched_task_cx_ptr: *mut TaskContext) {
let mut processor = PROCESSOR.exclusive_access();
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();
drop(processor);
unsafe {
__switch(switched_task_cx_ptr, idle_task_cx_ptr);
}
}

View File

@@ -0,0 +1,125 @@
// https://blog.aloni.org/posts/a-stack-less-rust-coroutine-100-loc/
// https://github.com/chyyuu/example-coroutine-and-thread/tree/stackless-coroutine-x86
// NOTE: This module is not required to finish the lab5, though you may run
// kernel_stackless_coroutine_test() in kernel main() to see what happens
use core::future::Future;
use core::pin::Pin;
use core::task::{Context, Poll};
use core::task::{RawWaker, RawWakerVTable, Waker};
extern crate alloc;
use alloc::collections::VecDeque;
use alloc::boxed::Box;
enum State {
Halted,
Running,
}
struct Task {
state: State,
}
impl Task {
fn waiter<'a>(&'a mut self) -> Waiter<'a> {
Waiter { task: self }
}
}
struct Waiter<'a> {
task: &'a mut Task,
}
impl<'a> Future for Waiter<'a> {
type Output = ();
fn poll(mut self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Self::Output> {
match self.task.state {
State::Halted => {
self.task.state = State::Running;
Poll::Ready(())
}
State::Running => {
self.task.state = State::Halted;
Poll::Pending
}
}
}
}
struct Executor {
tasks: VecDeque<Pin<Box<dyn Future<Output = ()>>>>,
}
impl Executor {
fn new() -> Self {
Executor {
tasks: VecDeque::new(),
}
}
fn push<C, F>(&mut self, closure: C)
where
F: Future<Output = ()> + 'static,
C: FnOnce(Task) -> F,
{
let task = Task {
state: State::Running,
};
self.tasks.push_back(Box::pin(closure(task)));
}
fn run(&mut self) {
let waker = create_waker();
let mut context = Context::from_waker(&waker);
while let Some(mut task) = self.tasks.pop_front() {
match task.as_mut().poll(&mut context) {
Poll::Pending => {
self.tasks.push_back(task);
}
Poll::Ready(()) => {}
}
}
}
}
pub fn create_waker() -> Waker {
// Safety: The waker points to a vtable with functions that do nothing. Doing
// nothing is memory-safe.
unsafe { Waker::from_raw(RAW_WAKER) }
}
const RAW_WAKER: RawWaker = RawWaker::new(core::ptr::null(), &VTABLE);
const VTABLE: RawWakerVTable = RawWakerVTable::new(clone, wake, wake_by_ref, drop);
unsafe fn clone(_: *const ()) -> RawWaker {
RAW_WAKER
}
unsafe fn wake(_: *const ()) {}
unsafe fn wake_by_ref(_: *const ()) {}
unsafe fn drop(_: *const ()) {}
#[no_mangle]
pub fn kernel_stackless_coroutine_test() {
println!("kernel stackless coroutine Begin..");
let mut exec = Executor::new();
println!(" Create futures");
for instance in 1..=3 {
exec.push(move |mut task| async move {
println!(" Kernel Task {}: begin state", instance);
task.waiter().await;
println!(" Kernel Task {}: next state", instance);
task.waiter().await;
println!(" Kernel Task {}: end state", instance);
});
}
println!(" Running");
exec.run();
println!(" Done");
println!("kernel stackless coroutine PASSED");
}

34
os8-ref/src/task/switch.S Normal file
View File

@@ -0,0 +1,34 @@
.altmacro
.macro SAVE_SN n
sd s\n, (\n+2)*8(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
__switch:
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext
# )
# save kernel stack of current task
sd sp, 8(a0)
# save ra & s0~s11 of current execution
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n + 1
.endr
# restore ra & s0~s11 of next execution
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
# restore kernel stack of next task
ld sp, 8(a1)
ret

View File

@@ -0,0 +1,16 @@
//! Rust wrapper around `__switch`.
//!
//! Switching to a different task's context happens here. The actual
//! implementation must not be in Rust and (essentially) has to be in assembly
//! language (Do you know why?), so this module really is just a wrapper around
//! `switch.S`.
core::arch::global_asm!(include_str!("switch.S"));
use super::TaskContext;
extern "C" {
/// Switch to the context of `next_task_cx_ptr`, saving the current context
/// in `current_task_cx_ptr`.
pub fn __switch(current_task_cx_ptr: *mut TaskContext, next_task_cx_ptr: *const TaskContext);
}

140
os8-ref/src/task/task.rs Normal file
View File

@@ -0,0 +1,140 @@
//! Types related to task management & Functions for completely changing TCB
use super::id::TaskUserRes;
use super::{kstack_alloc, KernelStack, ProcessControlBlock, TaskContext};
use crate::trap::TrapContext;
use crate::{mm::PhysPageNum, sync::UPSafeCell};
use alloc::sync::{Arc, Weak};
use core::cell::RefMut;
/// Task control block structure
///
/// Directly save the contents that will not change during running
pub struct TaskControlBlock {
// immutable
pub process: Weak<ProcessControlBlock>,
/// Kernel stack corresponding to TID
pub kernel_stack: KernelStack,
// mutable
inner: UPSafeCell<TaskControlBlockInner>,
}
/// Structure containing more process content
///
/// Store the contents that will change during operation
/// and are wrapped by UPSafeCell to provide mutual exclusion
pub struct TaskControlBlockInner {
/// The physical page number of the frame where the trap context is placed
pub trap_cx_ppn: PhysPageNum,
/// Save task context
pub task_cx: TaskContext,
/// Maintain the execution status of the current process
pub task_status: TaskStatus,
/// It is set when active exit or execution error occurs
pub exit_code: Option<i32>,
/// Tid and ustack will be deallocated when this goes None
pub res: Option<TaskUserRes>,
}
/// Simple access to its internal fields
impl TaskControlBlockInner {
/*
pub fn get_task_cx_ptr2(&self) -> *const usize {
&self.task_cx_ptr as *const usize
}
*/
pub fn get_trap_cx(&self) -> &'static mut TrapContext {
self.trap_cx_ppn.get_mut()
}
#[allow(unused)]
fn get_status(&self) -> TaskStatus {
self.task_status
}
}
impl TaskControlBlock {
pub fn new(
process: Arc<ProcessControlBlock>,
ustack_base: usize,
alloc_user_res: bool,
) -> Self {
let res = TaskUserRes::new(Arc::clone(&process), ustack_base, alloc_user_res);
let trap_cx_ppn = res.trap_cx_ppn();
let kernel_stack = kstack_alloc();
let kstack_top = kernel_stack.get_top();
Self {
process: Arc::downgrade(&process),
kernel_stack,
inner: unsafe {
UPSafeCell::new(TaskControlBlockInner {
res: Some(res),
trap_cx_ppn,
task_cx: TaskContext::goto_trap_return(kstack_top),
task_status: TaskStatus::Ready,
exit_code: None,
})
},
}
}
/// Get the mutex to get the RefMut TaskControlBlockInner
pub fn inner_exclusive_access(&self) -> RefMut<'_, TaskControlBlockInner> {
let inner = self.inner.exclusive_access();
// if self.process.upgrade().unwrap().pid.0 > 1 {
// if let Some(res) = inner.res.as_ref() {
// println!("t{}i", res.tid);
// }
// }
inner
}
pub fn get_user_token(&self) -> usize {
let process = self.process.upgrade().unwrap();
let inner = process.inner_exclusive_access();
inner.memory_set.token()
}
pub fn create_kthread(f: fn()) -> Self {
use crate::mm::PhysAddr;
let process = ProcessControlBlock::kernel_process();
let process = Arc::downgrade(&process);
let kernelstack = crate::task::id::KStack::new();
let kstack_top = kernelstack.top();
let mut context = TaskContext::zero_init();
let context_addr = &context as *const TaskContext as usize;
let pa = PhysAddr::from(context_addr);
let context_ppn = pa.floor();
context.ra = f as usize;
context.sp = kstack_top;
//println!("context ppn :{:#x?}", context_ppn);
Self {
process,
kernel_stack: KernelStack(kstack_top),
//kstack,
inner: unsafe {
UPSafeCell::new(TaskControlBlockInner {
res: None,
trap_cx_ppn: context_ppn,
task_cx: context,
task_status: TaskStatus::Ready,
exit_code: None,
})
},
}
}
}
#[derive(Copy, Clone, PartialEq)]
/// task status: UnInit, Ready, Running, Exited
pub enum TaskStatus {
UnInit,
Ready,
Running,
Blocking,
}