init 2023OSCOMP

This commit is contained in:
Yu Chen
2023-03-19 10:52:36 +08:00
parent 717259bb74
commit d99bbaca5f
779 changed files with 218 additions and 52440 deletions

View File

@@ -1,83 +0,0 @@
# syntax=docker/dockerfile:1
# This Dockerfile is adapted from https://github.com/LearningOS/rCore-Tutorial-v3/blob/main/Dockerfile
# with the following major updates:
# - ubuntu 18.04 -> 20.04
# - qemu 5.0.0 -> 7.0.0
# - Extensive comments linking to relevant documentation
FROM ubuntu:20.04
ARG QEMU_VERSION=7.0.0
ARG HOME=/root
# 0. Install general tools
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y \
curl \
git \
python3 \
wget
# 1. Set up QEMU RISC-V
# - https://learningos.github.io/rust-based-os-comp2022/0setup-devel-env.html#qemu
# - https://www.qemu.org/download/
# - https://wiki.qemu.org/Documentation/Platforms/RISCV
# - https://risc-v-getting-started-guide.readthedocs.io/en/latest/linux-qemu.html
# 1.1. Download source
WORKDIR ${HOME}
RUN wget https://download.qemu.org/qemu-${QEMU_VERSION}.tar.xz && \
tar xvJf qemu-${QEMU_VERSION}.tar.xz
# 1.2. Install dependencies
# - https://risc-v-getting-started-guide.readthedocs.io/en/latest/linux-qemu.html#prerequisites
RUN apt-get install -y \
autoconf automake autotools-dev curl libmpc-dev libmpfr-dev libgmp-dev \
gawk build-essential bison flex texinfo gperf libtool patchutils bc \
zlib1g-dev libexpat-dev git \
ninja-build pkg-config libglib2.0-dev libpixman-1-dev
# 1.3. Build and install from source
WORKDIR ${HOME}/qemu-${QEMU_VERSION}
RUN ./configure --target-list=riscv64-softmmu,riscv64-linux-user && \
make -j$(nproc) && \
make install
# 1.4. Clean up
WORKDIR ${HOME}
RUN rm -rf qemu-${QEMU_VERSION} qemu-${QEMU_VERSION}.tar.xz
# 1.5. Sanity checking
RUN qemu-system-riscv64 --version && \
qemu-riscv64 --version
# 2. Set up Rust
# - https://learningos.github.io/rust-based-os-comp2022/0setup-devel-env.html#qemu
# - https://www.rust-lang.org/tools/install
# - https://github.com/rust-lang/docker-rust/blob/master/Dockerfile-debian.template
# 2.1. Install
ENV RUSTUP_HOME=/usr/local/rustup \
CARGO_HOME=/usr/local/cargo \
PATH=/usr/local/cargo/bin:$PATH \
RUST_VERSION=nightly
RUN set -eux; \
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs -o rustup-init; \
chmod +x rustup-init; \
./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION; \
rm rustup-init; \
chmod -R a+w $RUSTUP_HOME $CARGO_HOME;
# 2.2. Sanity checking
RUN rustup --version && \
cargo --version && \
rustc --version
# 3. Build env for labs
# See os1/Makefile `env:` for example.
# This avoids having to wait for these steps each time using a new container.
RUN rustup target add riscv64gc-unknown-none-elf && \
cargo install cargo-binutils
# Ready to go
WORKDIR ${HOME}

View File

@@ -1,20 +1,25 @@
# Open-Source OS Training Comp 2022
# Open-Source OS Training Comp 2023
Welcome to Open-Source OS Training Comp 2022欢迎加入2022 年开源操作系统训练营)
Welcome to Open-Source OS Training Comp 2023欢迎加入2023 年开源操作系统训练营)
## 重要信息
- **2022.11.012022 秋冬季训练营启动交流会会议时间2022/11/01 20:00-21:00 #腾讯会议号838-197-763。请报名的同学参加。**
- [**Stage1 SCHEDULING**](./scheduling.md) & [**Stage2 SCHEDULING**](https://github.com/LearningOS/oscomp-kernel-training)2022 年开源操作系统训练营的第一/二阶段安排,可根据这些阶段安排的信息进行自学和自我训练)
- [**News**](./news.md)2022 年开源操作系统训练营的新闻和纪要)
- 2023.04.012023 年春夏季开源操作系统训练营正式启动。
- 2023.03.19: 2023 年春夏季开源操作系统训练营启动&报名交流会会议时间2023/03/25 11:00-12:00 #腾讯会议号231-190-126 会议密码0325。请报名的同学参加。
- [**第一阶段训练安排**](./scheduling-1.md)
- [**第二阶段训练安排**](./scheduling-2.md)
- [**(可选)竞赛级训练安排**](https://github.com/LearningOS/oscomp-kernel-training)
- [**News**](./news.md)2023 年开源操作系统训练营的新闻和纪要)
- [**QA**](./QA.md)(常见问题解答)
- **Online Ranking**(训练营在线排行榜)
- [第一阶段排行Rust Lang & rCore Kernel](https://learningos.github.io/classroom-grading/)
- [第二阶段排行OS Kernel Implementation in OSCOMP2022](https://os-autograding.github.io/classroom-grading-template/)
- [第二阶段排行OS Kernel Implementation in OSCOMP2022](https://os-autograding.github.io/classroom-grading-template/)
- [**Learning Resource**](./relatedinfo.md)(训练营学习资源)
## 历史
##
## 历史
- [open-source os training comp 2022](https://github.com/LearningOS/rust-based-os-comp2023/tree/comp2022)
- [open-source os training comp 2021](https://github.com/rcore-os/rCore/wiki/os-tutorial-summer-of-code-2021)
- [open-source os training comp 2020](https://github.com/rcore-os/rCore/wiki/os-tutorial-summer-of-code-2020)
@@ -22,9 +27,9 @@ Welcome to Open-Source OS Training Comp 2022欢迎加入2022 年开源操作
- 李明 微信idlimingth
## 助教
- 唐洪雨 微信idthy1037
- 刘逸珑 微信idonedragon424
- 陈文杰 微信idtor4zS6
- 田凯夫 微信idtkf15944806118
- 闭浩扬 微信idscpointer
- 杨金博 微信idyangjinbo2000
## 技术指导委员会
@@ -59,4 +64,4 @@ Welcome to Open-Source OS Training Comp 2022欢迎加入2022 年开源操作
- 华为
- 智谱 ai
- 101 计划操作系统课程虚拟教研室
- [上海大学开源社区](https://github.com/shuosc/)
- [上海大学开源社区](https://github.com/shuosc/)

Binary file not shown.

6
ci-user/.gitignore vendored
View File

@@ -1,6 +0,0 @@
user/target
user/build
user/Cargo.lock
user/.idea
.idea
__pycache__

View File

@@ -1,61 +0,0 @@
default:
image: zhanghx0905/rcore-ci
stages:
- build
build-ch2-job:
stage: build
script:
- cd user && make all CHAPTER=2
artifacts:
paths:
- user/build/bin/*.bin
build-ch3_0-job:
stage: build
script:
- cd user && make all CHAPTER=3_0
artifacts:
paths:
- user/build/bin/*.bin
build-ch3_1-job:
stage: build
script:
- cd user && make all CHAPTER=3_1
artifacts:
paths:
- user/build/bin/*.bin
build-ch4-job:
stage: build
script:
- cd user && make all CHAPTER=4
artifacts:
paths:
- user/build/elf/*.elf
build-ch5-job:
stage: build
script:
- cd user && make all CHAPTER=5
artifacts:
paths:
- user/build/elf/*.elf
build-ch6-job:
stage: build
script:
- cd user && make all CHAPTER=6
artifacts:
paths:
- user/build/elf/*.elf
build-ch7-job:
stage: build
script:
- cd user && make all CHAPTER=7
artifacts:
paths:
- user/build/elf/*.elf

View File

@@ -1,48 +0,0 @@
RAND := $(shell awk 'BEGIN{srand();printf("%d", 65536*rand())}')
CHAPTER ?=
ifeq ($(CHAPTER), 3)
LAB := 1
else ifeq ($(CHAPTER), 4)
LAB := 2
else ifeq ($(CHAPTER), 5)
INITPROC := 1
LAB := 3
else ifeq ($(CHAPTER), 6)
INITPROC := 1
LAB := 4
else ifeq ($(CHAPTER), 7)
INITPROC := 1
LAB := 4
else ifeq ($(CHAPTER), 8)
INITPROC := 1
LAB := 5
endif
randomize:
find user/src/bin -name "*.rs" | xargs perl -pi -e s,OK,OK$(RAND),g
find user/src/bin -name "*.rs" | xargs perl -pi -e s,passed,passed$(RAND),g
find check -name "*.py" | xargs perl -pi -e s,OK,OK$(RAND),g
find check -name "*.py" | xargs perl -pi -e s,passed,passed$(RAND),g
test: randomize
python3 overwrite.py $(CHAPTER)
make -C user build BASE=2 TEST=$(CHAPTER) CHAPTER=$(CHAPTER)
ifdef INITPROC
cp -f user/build/elf/ch$(CHAPTER)_usertest.elf user/build/elf/ch$(CHAPTER)b_initproc.elf
endif
make -C ../os run | tee stdout-ch$(CHAPTER)
python3 check/ch$(CHAPTER).py < stdout-ch$(CHAPTER)
ifdef LAB
@for i in $(shell seq $(LAB)); do \
if ! [ -f ../reports/lab$$i.pdf -o -f ../reports/lab$$i.md ]; then \
echo "Report for lab$$i needed. Add your report to reports/lab$$i.pdf or reports/lab$$i.md" ; \
exit 1 ; \
else \
echo "Report for lab$$i found." ; \
fi; \
done
endif
.PHONY: test randomize

View File

@@ -1,67 +0,0 @@
## rCore_tutorial_v3 TESTS
本项目用于为 rCore 实验进行 CI 测试,在 user 目录下 `make all CHAPTER=x` 可获得第 x 章的测例。
- 可选项 2, 3_0, 3_2, 4, 5, 6, 7。
**重要**-加载地址更新:
- chapter2 所有程序加载位置位于 0x80400000与示例代码一致。
- chapter3 测试程序分为 3 批,每一批的地址都为 0x80400000 + id\*0x20000id 为程序在这一批中的序号。每一批都与参考代码一致,请分别测试。
- chapter4-7 所有程序加载位置位于 0x0与示例代码一致。
可以在 `user/build/asm` 目录下查看汇编来确认加载地址。
**测例更新**
- 一部分无用测例已删除,包括 ch2_helloworld, ch3_1_yield 等。
- sleep 测例被转移到第四章
- ch4 之后不再测试 write1
rust 的把user测例分散到了各个branch里当时想的是尽量把测试的过程屏蔽掉现在看确实不便于管理这学期就先这样算了.
### 各章的测例
#### ch3
test1write0 write1
test2setprio
test3stride的六个测例
#### ch4
test1sleep0 sleep1 测试 sys_time
test2map0123 unmap12 测试 map unmap 实现
#### ch5 6 7 8
```rust
"test_sleep\0",
"test_sleep1\0",
"test_mmap0\0",
"test_mmap1\0",
"test_mmap2\0",
"test_mmap3\0",
"test_unmap\0",
"test_unmap2\0",
"test_spawn0\0",
"test_spawn1\0",
// ch6
"test_mail0\0",
"test_mail1\0",
"test_mail2\0",
"test_mail3\0",
// ch7
"test_file0\0",
"test_file1\0",
"test_file2\0",
// ch8
...
```
share mem 的测例放着就行。
ch8 先不管。

View File

@@ -1,46 +0,0 @@
import sys
import re
import ch5_1
def test(expected, not_expected=[]):
output = sys.stdin.read(1000000)
count = 0
total = len(expected) + len(not_expected)
for pattern in expected:
if re.search(pattern, output):
count += 1
print(f'\033[92m[PASS]\033[0m found <{pattern}>')
else:
print(f'\033[91m[FAIL]\033[0m not found <{pattern}>')
for pattern in not_expected:
if not re.search(pattern, output):
count += 1
print(f'\033[92m[PASS]\033[0m not found <{pattern}>')
else:
print(f'\033[91m[FAIL]\033[0m found <{pattern}>')
print('\nTest passed: %d/%d' % (count, total))
assert count == total
# test stride
if re.search(ch5_1.PATTERN, output):
ch5_1.stride_test(re.compile(ch5_1.PATTERN).findall(output))
# def test_str(expected):
# output = sys.stdin.read(1000000)
# count = 0
# total = len(expected)
# for pattern in expected:
# if output.find(pattern) != -1:
# count += 1
# print('\033[92m[PASS]\033[0m', pattern)
# else:
# print('\033[91m[FAIL]\033[0m', pattern)
# print('\nTest passed: %d/%d' % (count, total))
# assert count == total

View File

@@ -1,14 +0,0 @@
import base
EXPECTED = [
"Hello, world!",
]
TEMP = []
NOT_EXPECTED = [
"FAIL: T.T",
]
if __name__ == "__main__":
base.test(EXPECTED + TEMP, NOT_EXPECTED)

View File

@@ -1,17 +0,0 @@
import base
EXPECTED = [
"Hello, world from user mode program!",
"Test power_3 OK!",
"Test power_5 OK!",
"Test power_7 OK!",
]
TEMP = []
NOT_EXPECTED = [
"FAIL: T.T",
]
if __name__ == "__main__":
base.test(EXPECTED + TEMP, NOT_EXPECTED)

View File

@@ -1,21 +0,0 @@
import base
from ch2 import EXPECTED, NOT_EXPECTED
EXPECTED += [
r"get_time OK! (\d+)",
"Test sleep OK!",
r"current time_msec = (\d+)",
r"time_msec = (\d+) after sleeping (\d+) ticks, delta = (\d+)ms!",
"Test sleep1 passed!",
"Test write A OK!",
"Test write B OK!",
"Test write C OK!",
]
EXPECTED += [
"string from task info test",
"Test task info OK!",
]
if __name__ == "__main__":
base.test(EXPECTED, NOT_EXPECTED)

View File

@@ -1,18 +0,0 @@
import base
from ch3 import EXPECTED, NOT_EXPECTED
EXPECTED += [
"Test 04_1 OK!",
"Test 04_4 test OK!",
"Test 04_5 ummap OK!",
"Test 04_6 ummap2 OK!",
]
NOT_EXPECTED += [
"Should cause error, Test 04_2 fail!",
"Should cause error, Test 04_3 fail!",
]
if __name__ == "__main__":
base.test(EXPECTED, NOT_EXPECTED)

View File

@@ -1,22 +0,0 @@
import base
from ch4 import EXPECTED, NOT_EXPECTED
EXPECTED += [
r"Test getpid OK! pid = (\d+)",
"Test spawn0 OK!",
"Test wait OK!",
"Test waitpid OK!",
"Test set_priority OK!",
]
EXPECTED = list(set(EXPECTED) - set([
"string from task info test",
"Test task info OK!",
]))
TEMP = [
# "ch5 Usertests passed!",
]
if __name__ == '__main__':
base.test(EXPECTED + TEMP, NOT_EXPECTED)

View File

@@ -1,15 +0,0 @@
PATTERN = r"ratio = (\d+)"
def stride_test(result):
assert len(result) == 6
factors = [int(i) for i in result]
print('\nstride ratio =', factors)
if max(factors) / min(factors) < 1.5:
print('\033[92m[PASS]\033[0m Stride Test')
print('\nTest passed: 1/1')
else:
print('\033[91m[FAIL]\033[0m Stride Test')
print('\nTest passed: 0/1')
assert max(factors) / min(factors) < 1.5

View File

@@ -1,20 +0,0 @@
import base
from ch5 import EXPECTED, NOT_EXPECTED
EXPECTED += [
"Test file0 OK!",
"Test fstat OK!",
"Test link OK!",
"Test mass open/unlink OK!"
]
EXPECTED = list(set(EXPECTED) - set([
"Test set_priority OK!"
]))
TEMP = [
# "ch6 Usertests passed!",
]
if __name__ == '__main__':
base.test(EXPECTED + TEMP, NOT_EXPECTED)

View File

@@ -1,4 +0,0 @@
import base
if __name__ == '__main__':
base.test([], [])

View File

@@ -1,42 +0,0 @@
import base
from ch5 import NOT_EXPECTED
EXPECTED = [
# ch2b
"Hello, world from user mode program!",
"Test power_3 OK!",
"Test power_5 OK!",
"Test power_7 OK!",
# ch3b
r"get_time OK! (\d+)",
"Test sleep OK!",
r"current time_msec = (\d+)",
r"time_msec = (\d+) after sleeping (\d+) ticks, delta = (\d+)ms!",
"Test sleep1 passed!",
"Test write A OK!",
"Test write B OK!",
"Test write C OK!",
# ch5b
"forktest2 test passed!",
# ch6b
"file_test passed!",
# ch7b
"pipetest passed!",
# ch8b
"mpsc_sem passed!",
"philosopher dining problem with mutex test passed!",
"race adder using spin mutex test passed!",
"sync_sem passed!",
"test_condvar passed!",
"threads with arg test passed!",
"threads test passed!",
# ch8
"deadlock test mutex 1 OK!",
"deadlock test semaphore 1 OK!",
"deadlock test semaphore 2 OK!",
"ch8 Usertests passed!",
]
if __name__ == "__main__":
base.test(EXPECTED, NOT_EXPECTED)

View File

@@ -1,28 +0,0 @@
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("chapter", type=int)
chapter = parser.parse_args().chapter
if 5 >= chapter >= 4:
os.system("cp overwrite/build-elf.rs ../os/build.rs")
elif chapter < 4:
os.system("cp overwrite/build-bin.rs ../os/build.rs")
if chapter <= 5:
os.system("cp overwrite/Makefile-ch3 ../os/Makefile")
elif chapter <= 6:
os.system("cp overwrite/Makefile-ch6 ../os/Makefile")
os.system("cp overwrite/easy-fs-fuse.rs ../easy-fs-fuse/src/main.rs")
elif chapter <= 8:
os.system("cp overwrite/Makefile-ch6 ../os/Makefile")
os.system("cp overwrite/easy-fs-fuse-ch7.rs ../easy-fs-fuse/src/main.rs")
lines = []
with open("../os/Cargo.toml", 'r') as f:
for line in f.readlines():
processed = line.replace(' git = "https://github.com/rcore-os/riscv"', ' path = "../ci-user/riscv" ')
lines.append(processed)
with open("../os/Cargo.toml", 'w+') as f:
f.writelines(lines)

View File

@@ -1,24 +0,0 @@
# Building
TARGET := riscv64gc-unknown-none-elf
MODE := release
KERNEL_ELF := target/$(TARGET)/$(MODE)/os
# BOARD
BOARD ?= qemu
SBI ?= rustsbi
BOOTLOADER := ../bootloader/$(SBI)-$(BOARD).bin
kernel:
cargo build --release
clean:
cargo clean
run: kernel
timeout --foreground 30s qemu-system-riscv64 \
-machine virt \
-nographic \
-bios $(BOOTLOADER) \
-kernel $(KERNEL_ELF)
.PHONY: build kernel clean run

View File

@@ -1,24 +0,0 @@
# Building
TARGET := riscv64gc-unknown-none-elf
MODE := release
KERNEL_ELF := target/$(TARGET)/$(MODE)/os
# BOARD
BOARD ?= qemu
SBI ?= rustsbi
BOOTLOADER := ../bootloader/$(SBI)-$(BOARD).bin
kernel:
cargo build --release
clean:
cargo clean
run: kernel
timeout --foreground 40s qemu-system-riscv64 \
-machine virt \
-nographic \
-bios $(BOOTLOADER) \
-kernel $(KERNEL_ELF)
.PHONY: build kernel clean run

View File

@@ -1,31 +0,0 @@
# Building
TARGET := riscv64gc-unknown-none-elf
MODE := release
KERNEL_ELF := target/$(TARGET)/$(MODE)/os
FS_IMG := ../ci-user/user/build/fs.img
# BOARD
BOARD ?= qemu
SBI ?= rustsbi
BOOTLOADER := ../bootloader/$(SBI)-$(BOARD).bin
fsimg:
cd ../easy-fs-fuse && cargo run --release -- \
-s ../ci-user/user/build/elf \
-o $(FS_IMG)
kernel: fsimg
cargo build --release
clean:
cargo clean
run: kernel
timeout --foreground 30s qemu-system-riscv64 \
-machine virt \
-nographic \
-bios $(BOOTLOADER) \
-kernel $(KERNEL_ELF) \
-drive file=$(FS_IMG),if=none,format=raw,id=x0 \
-device virtio-blk-device,drive=x0,bus=virtio-mmio-bus.0
.PHONY: build kernel clean run

View File

@@ -1,56 +0,0 @@
use std::io::{Result, Write};
use std::fs::{File, read_dir};
fn main() {
println!("cargo:rerun-if-changed=../ci-user/user/src/");
println!("cargo:rerun-if-changed={}", TARGET_PATH);
insert_app_data().unwrap();
}
static TARGET_PATH: &str = "../ci-user/user/build/bin/";
fn insert_app_data() -> Result<()> {
let mut f = File::create("src/link_app.S").unwrap();
let mut apps: Vec<_> = read_dir("../ci-user/user/build/bin")
.unwrap()
.into_iter()
.map(|dir_entry| {
let mut name_with_ext = dir_entry.unwrap().file_name().into_string().unwrap();
name_with_ext.drain(name_with_ext.find('.').unwrap()..name_with_ext.len());
name_with_ext
})
.collect();
apps.sort();
writeln!(f, r#"
.align 3
.section .data
.global _num_app
_num_app:
.quad {}"#, apps.len())?;
for i in 0..apps.len() {
writeln!(f, r#" .quad app_{}_start"#, i)?;
}
writeln!(f, r#" .quad app_{}_end"#, apps.len() - 1)?;
writeln!(f, r#"
.global _app_names
_app_names:"#)?;
for app in apps.iter() {
writeln!(f, r#" .string "{}""#, app)?;
}
for (idx, app) in apps.iter().enumerate() {
println!("app_{}: {}", idx, app);
writeln!(f, r#"
.section .data
.global app_{0}_start
.global app_{0}_end
.align 3
app_{0}_start:
.incbin "{2}{1}.bin"
app_{0}_end:"#, idx, app, TARGET_PATH)?;
}
Ok(())
}

View File

@@ -1,56 +0,0 @@
use std::io::{Result, Write};
use std::fs::{File, read_dir};
fn main() {
println!("cargo:rerun-if-changed=../ci-user/user/src/");
println!("cargo:rerun-if-changed={}", TARGET_PATH);
insert_app_data().unwrap();
}
static TARGET_PATH: &str = "../ci-user/user/build/elf/";
fn insert_app_data() -> Result<()> {
let mut f = File::create("src/link_app.S").unwrap();
let mut apps: Vec<_> = read_dir("../ci-user/user/build/elf")
.unwrap()
.into_iter()
.map(|dir_entry| {
let mut name_with_ext = dir_entry.unwrap().file_name().into_string().unwrap();
name_with_ext.drain(name_with_ext.find('.').unwrap()..name_with_ext.len());
name_with_ext
})
.collect();
apps.sort();
writeln!(f, r#"
.align 3
.section .data
.global _num_app
_num_app:
.quad {}"#, apps.len())?;
for i in 0..apps.len() {
writeln!(f, r#" .quad app_{}_start"#, i)?;
}
writeln!(f, r#" .quad app_{}_end"#, apps.len() - 1)?;
writeln!(f, r#"
.global _app_names
_app_names:"#)?;
for app in apps.iter() {
writeln!(f, r#" .string "{}""#, app)?;
}
for (idx, app) in apps.iter().enumerate() {
println!("app_{}: {}", idx, app);
writeln!(f, r#"
.section .data
.global app_{0}_start
.global app_{0}_end
.align 3
app_{0}_start:
.incbin "{2}{1}.elf"
app_{0}_end:"#, idx, app, TARGET_PATH)?;
}
Ok(())
}

View File

@@ -1,82 +0,0 @@
use clap::{App, Arg};
use easy_fs::{BlockDevice, EasyFileSystem};
use std::fs::{read_dir, File, OpenOptions};
use std::io::{Read, Seek, SeekFrom, Write};
use std::sync::Arc;
use std::sync::Mutex;
const BLOCK_SZ: usize = 512;
struct BlockFile(Mutex<File>);
impl BlockDevice for BlockFile {
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
let mut file = self.0.lock().unwrap();
file.seek(SeekFrom::Start((block_id * BLOCK_SZ) as u64))
.expect("Error when seeking!");
assert_eq!(file.read(buf).unwrap(), BLOCK_SZ, "Not a complete block!");
}
fn write_block(&self, block_id: usize, buf: &[u8]) {
let mut file = self.0.lock().unwrap();
file.seek(SeekFrom::Start((block_id * BLOCK_SZ) as u64))
.expect("Error when seeking!");
assert_eq!(file.write(buf).unwrap(), BLOCK_SZ, "Not a complete block!");
}
}
fn main() {
easy_fs_pack().expect("Error when packing easy-fs!");
}
fn easy_fs_pack() -> std::io::Result<()> {
let matches = App::new("EasyFileSystem packer")
.arg(
Arg::with_name("source")
.short("s")
.long("source")
.takes_value(true)
.help("Executable source dir"),
)
.arg(
Arg::with_name("output")
.short("o")
.long("output")
.takes_value(true)
.help("Output file path"),
)
.get_matches();
let src_path = matches.value_of("source").unwrap();
let output_path = matches.value_of("output").unwrap();
println!("src_path = {}\noutput_path = {}", src_path, output_path);
let block_file = Arc::new(BlockFile(Mutex::new({
let f = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(output_path)?;
f.set_len(16384 * 512).unwrap();
f
})));
// 4MiB, at most 4095 files
let efs = EasyFileSystem::create(block_file.clone(), 16384, 1);
let root_inode = Arc::new(EasyFileSystem::root_inode(&efs));
for dir_entry in read_dir(src_path).unwrap() {
let dir_entry = dir_entry.unwrap();
let path = dir_entry.path();
// load app data from host file system
let mut host_file = File::open(&path).unwrap();
let mut all_data: Vec<u8> = Vec::new();
host_file.read_to_end(&mut all_data).unwrap();
// create a file in easy-fs
let name = path.file_stem().unwrap().to_str().unwrap();
let inode = root_inode.create(name).unwrap();
// write data to easy-fs
inode.write_at(0, all_data.as_slice());
}
// list apps
for app in root_inode.ls() {
println!("{}", app);
}
Ok(())
}

View File

@@ -1,82 +0,0 @@
use clap::{App, Arg};
use easy_fs::{BlockDevice, EasyFileSystem};
use std::fs::{read_dir, File, OpenOptions};
use std::io::{Read, Seek, SeekFrom, Write};
use std::sync::Arc;
use std::sync::Mutex;
const BLOCK_SZ: usize = 512;
struct BlockFile(Mutex<File>);
impl BlockDevice for BlockFile {
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
let mut file = self.0.lock().unwrap();
file.seek(SeekFrom::Start((block_id * BLOCK_SZ) as u64))
.expect("Error when seeking!");
assert_eq!(file.read(buf).unwrap(), BLOCK_SZ, "Not a complete block!");
}
fn write_block(&self, block_id: usize, buf: &[u8]) {
let mut file = self.0.lock().unwrap();
file.seek(SeekFrom::Start((block_id * BLOCK_SZ) as u64))
.expect("Error when seeking!");
assert_eq!(file.write(buf).unwrap(), BLOCK_SZ, "Not a complete block!");
}
}
fn main() {
easy_fs_pack().expect("Error when packing easy-fs!");
}
fn easy_fs_pack() -> std::io::Result<()> {
let matches = App::new("EasyFileSystem packer")
.arg(
Arg::with_name("source")
.short("s")
.long("source")
.takes_value(true)
.help("Executable source dir"),
)
.arg(
Arg::with_name("output")
.short("o")
.long("output")
.takes_value(true)
.help("Output file path"),
)
.get_matches();
let src_path = matches.value_of("source").unwrap();
let output_path = matches.value_of("output").unwrap();
println!("src_path = {}\noutput_path = {}", src_path, output_path);
let block_file = Arc::new(BlockFile(Mutex::new({
let f = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(output_path)?;
f.set_len(14000 * 512).unwrap();
f
})));
// 4MiB, at most 4095 files
let efs = EasyFileSystem::create(block_file.clone(), 14000, 1);
let root_inode = Arc::new(EasyFileSystem::root_inode(&efs));
for dir_entry in read_dir(src_path).unwrap() {
let dir_entry = dir_entry.unwrap();
let path = dir_entry.path();
// load app data from host file system
let mut host_file = File::open(&path).unwrap();
let mut all_data: Vec<u8> = Vec::new();
host_file.read_to_end(&mut all_data).unwrap();
// create a file in easy-fs
let name = path.file_stem().unwrap().to_str().unwrap();
let inode = root_inode.create(name).unwrap();
// write data to easy-fs
inode.write_at(0, all_data.as_slice());
}
// list apps
for app in root_inode.ls() {
println!("{}", app);
}
Ok(())
}

View File

@@ -1,5 +0,0 @@
Cargo.lock
target/
bin/*.after
bin/*.before
bin/*.o

View File

@@ -1,51 +0,0 @@
language: rust
env:
- TARGET=x86_64-unknown-linux-gnu
- TARGET=riscv32imac-unknown-none-elf
- TARGET=riscv64imac-unknown-none-elf
- TARGET=riscv64gc-unknown-none-elf
rust:
- nightly
- stable
- 1.42.0 # MSRV
if: (branch = staging OR branch = trying OR branch = master) OR (type = pull_request AND branch = master)
matrix:
allow_failures:
- rust: nightly
include:
- env: CHECK_BLOBS=1
rust:
language: bash
if: (branch = staging OR branch = trying OR branch = master) OR (type = pull_request AND branch = master)
- env: RUSTFMT=1
rust: stable
if: (branch = staging OR branch = trying OR branch = master) OR (type = pull_request AND branch = master)
install:
- ci/install.sh
script:
- ci/script.sh
cache:
cargo: true
directories:
- gcc
branches:
only:
- master
- staging
- trying
notifications:
email:
on_success: never

View File

@@ -1,45 +0,0 @@
# Change Log
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
## [Unreleased]
## [v0.6.0] - 2020-06-20
### Changed
- `Mtvec::trap_mode()`, `Stvec::trap_mode()` and `Utvec::trap_mode()` functions now return `Option<TrapMode>` (breaking change)
- Updated Minimum Supported Rust Version to 1.42.0
- Use `llvm_asm!` instead of `asm!`
### Removed
- vexriscv-specific registers were moved to the `vexriscv` crate
## [v0.5.6] - 2020-03-14
### Added
- Added vexriscv-specific registers
## [v0.5.5] - 2020-02-28
### Added
- Added `riscv32i-unknown-none-elf` target support
- Added user trap setup and handling registers
- Added write methods for the `mip` and `satp` registers
- Added `mideleg` register
- Added Changelog
### Changed
- Fixed MSRV by restricting the upper bound of `bare-metal` version
[Unreleased]: https://github.com/rust-embedded/riscv/compare/v0.6.0...HEAD
[v0.6.0]: https://github.com/rust-embedded/riscv/compare/v0.5.6...v0.6.0
[v0.5.6]: https://github.com/rust-embedded/riscv/compare/v0.5.5...v0.5.6
[v0.5.5]: https://github.com/rust-embedded/riscv/compare/v0.5.4...v0.5.5

View File

@@ -1,37 +0,0 @@
# The Rust Code of Conduct
## Conduct
**Contact**: [RISC-V team](https://github.com/rust-embedded/wg#the-riscv-team)
* We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic.
* On IRC, please avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all.
* Please be kind and courteous. There's no need to be mean or rude.
* Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer.
* Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works.
* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behavior. We interpret the term "harassment" as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don't tolerate behavior that excludes people in socially marginalized groups.
* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel ops or any of the [RISC-V team][team] immediately. Whether you're a regular contributor or a newcomer, we care about making this community a safe place for you and we've got your back.
* Likewise any spamming, trolling, flaming, baiting or other attention-stealing behavior is not welcome.
## Moderation
These are the policies for upholding our community's standards of conduct.
1. Remarks that violate the Rust standards of conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.)
2. Remarks that moderators find inappropriate, whether listed in the code of conduct or not, are also not allowed.
3. Moderators will first respond to such remarks with a warning.
4. If the warning is unheeded, the user will be "kicked," i.e., kicked out of the communication channel to cool off.
5. If the user comes back and continues to make trouble, they will be banned, i.e., indefinitely excluded.
6. Moderators may choose at their discretion to un-ban the user if it was a first offense and they offer the offended party a genuine apology.
7. If a moderator bans someone and you think it was unjustified, please take it up with that moderator, or with a different moderator, **in private**. Complaints about bans in-channel are not allowed.
8. Moderators are held to a higher standard than other community members. If a moderator creates an inappropriate situation, they should expect less leeway than others.
In the Rust community we strive to go the extra step to look out for each other. Don't just aim to be technically unimpeachable, try to be your best self. In particular, avoid flirting with offensive or sensitive issues, particularly if they're off-topic; this all too often leads to unnecessary fights, hurt feelings, and damaged trust; worse, it can drive people away from the community entirely.
And if someone takes issue with something you said or did, resist the urge to be defensive. Just stop doing what it was they complained about and apologize. Even if you feel you were misinterpreted or unfairly accused, chances are good there was something you could've communicated better — remember that it's your responsibility to make your fellow Rustaceans comfortable. Everyone wants to get along and we are all here first and foremost because we want to talk about cool technology. You will find that people will be eager to assume good intent and forgive as long as you earn their trust.
The enforcement policies listed above apply to all official embedded WG venues; including official IRC channels (#rust-embedded); GitHub repositories under rust-embedded; and all forums under rust-embedded.org (forum.rust-embedded.org).
*Adapted from the [Node.js Policy on Trolling](http://blog.izs.me/post/30036893703/policy-on-trolling) as well as the [Contributor Covenant v1.3.0](https://www.contributor-covenant.org/version/1/3/0/).*
[team]: https://github.com/rust-embedded/wg#the-riscv-team

View File

@@ -1,21 +0,0 @@
[package]
name = "riscv"
version = "0.6.0"
repository = "https://github.com/rust-embedded/riscv"
authors = ["The RISC-V Team <risc-v@teams.rust-embedded.org>"]
categories = ["embedded", "hardware-support", "no-std"]
description = "Low level access to RISC-V processors"
keywords = ["riscv", "register", "peripheral"]
license = "ISC"
[dependencies]
bare-metal = "0.2.5"
bitflags = "1.0"
bit_field = "0.10.0"
log = "0.4"
[build-dependencies]
riscv-target = "0.1.2"
[features]
inline-asm = []

View File

@@ -1,41 +0,0 @@
[![crates.io](https://img.shields.io/crates/d/riscv.svg)](https://crates.io/crates/riscv)
[![crates.io](https://img.shields.io/crates/v/riscv.svg)](https://crates.io/crates/riscv)
[![Build Status](https://travis-ci.org/rust-embedded/riscv.svg?branch=master)](https://travis-ci.org/rust-embedded/riscv)
# `riscv`
> Low level access to RISC-V processors
This project is developed and maintained by the [RISC-V team][team].
## [Documentation](https://docs.rs/crate/riscv)
## Minimum Supported Rust Version (MSRV)
This crate is guaranteed to compile on stable Rust 1.42.0 and up. It *might*
compile with older versions but that may change in any new patch release.
## License
Copyright 2019-2020 [RISC-V team][team]
Permission to use, copy, modify, and/or distribute this software for any purpose
with or without fee is hereby granted, provided that the above copyright notice
and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.
## Code of Conduct
Contribution to this crate is organized under the terms of the [Rust Code of
Conduct][CoC], the maintainer of this crate, the [RISC-V team][team], promises
to intervene to uphold that code of conduct.
[CoC]: CODE_OF_CONDUCT.md
[team]: https://github.com/rust-embedded/wg#the-riscv-team

View File

@@ -1,454 +0,0 @@
#include "asm.h"
.section .text.__ebreak
.global __ebreak
__ebreak:
ebreak
ret
.section .text.__wfi
.global __wfi
__wfi:
wfi
ret
.section .text.__sfence_vma_all
.global __sfence_vma_all
__sfence_vma_all:
sfence.vma
ret
.section .text.__sfence_vma
.global __sfence_vma
__sfence_vma:
sfence.vma a0, a1
ret
// RISC-V hypervisor instructions.
// The switch for enabling LLVM support for asm generation.
// #define LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
.section .text.__hfence_gvma
.global __hfence_gvma
__hfence_gvma:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hfence.gvma a0, a1
#else
.word 1656029299
#endif
ret
.section .text.__hfence_vvma
.global __hfence_vvma
__hfence_vvma:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hfence.vvma a0, a1
#else
.word 582287475
#endif
ret
.section .text.__hlv_b
.global __hlv_b
__hlv_b:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlv.b a0, a0
#else
.word 1610958195
#endif
ret
.section .text.__hlv_bu
.global __hlv_bu
__hlv_bu:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlv.bu a0, a0
#else
.word 1612006771
#endif
ret
.section .text.__hlv_h
.global __hlv_h
__hlv_h:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlv.h a0, a0
#else
.word 1678067059
#endif
ret
.section .text.__hlv_hu
.global __hlv_hu
__hlv_hu:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlv.hu a0, a0
#else
.word 1679115635
#endif
ret
.section .text.__hlvx_hu
.global __hlvx_hu
__hlvx_hu:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlvx.hu a0, a0
#else
.word 1681212787
#endif
ret
.section .text.__hlv_w
.global __hlv_w
__hlv_w:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlv.w a0, a0
#else
.word 1745175923
#endif
ret
.section .text.__hlvx_wu
.global __hlvx_wu
__hlvx_wu:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlvx.wu a0, a0
#else
.word 1748321651
#endif
ret
.section .text.__hsv_b
.global __hsv_b
__hsv_b:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hsv.b a0, a1
#else
.word 1656045683
#endif
ret
.section .text.__hsv_h
.global __hsv_h
__hsv_h:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hsv.h a0, a1
#else
.word 1723154547
#endif
ret
.section .text.__hsv_w
.global __hsv_w
__hsv_w:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hsv.w a0, a1
#else
.word 1790263411
#endif
ret
.section .text.__hlv_wu
.global __hlv_wu
__hlv_wu:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlv.wu a0, a0
#else
.word 1746224499
#endif
ret
.section .text.__hlv_d
.global __hlv_d
__hlv_d:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hlv.d a0, a0
#else
.word 1812284787
#endif
ret
.section .text.__hsv_d
.global __hsv_d
__hsv_d:
#ifdef LLVM_RISCV_HYPERVISOR_EXTENSION_SUPPORT
hsv.d a0, a1
#else
.word 1857372275
#endif
ret
// User Trap Setup
RW(0x000, ustatus) // User status register
RW(0x004, uie) // User interrupt-enable register
RW(0x005, utvec) // User trap handler base address
// User Trap Handling
RW(0x040, uscratch) // Scratch register for user trap handlers
RW(0x041, uepc) // User exception program counter
RW(0x042, ucause) // User trap cause
RW(0x043, utval) // User bad address or instruction
RW(0x044, uip) // User interrupt pending
// User Floating-Point CSRs
RW(0x001, fflags) // Floating-Point Accrued Exceptions
RW(0x002, frm) // Floating-Point Dynamic Rounding Mode
RW(0x003, fcsr) // Floating-Point Control and Status Register (frm + fflags)
// User Counter/Timers
RO( 0xC00, cycle) // Cycle counter for RDCYCLE instruction
RO( 0xC01, time) // Timer for RDTIME instruction
RO( 0xC02, instret) // Instructions-retired counter for RDINSTRET instruction
RO( 0xC03, hpmcounter3) // Performance-monitoring counter
RO( 0xC04, hpmcounter4) // Performance-monitoring counter
RO( 0xC05, hpmcounter5) // Performance-monitoring counter
RO( 0xC06, hpmcounter6) // Performance-monitoring counter
RO( 0xC07, hpmcounter7) // Performance-monitoring counter
RO( 0xC08, hpmcounter8) // Performance-monitoring counter
RO( 0xC09, hpmcounter9) // Performance-monitoring counter
RO( 0xC0A, hpmcounter10) // Performance-monitoring counter
RO( 0xC0B, hpmcounter11) // Performance-monitoring counter
RO( 0xC0C, hpmcounter12) // Performance-monitoring counter
RO( 0xC0D, hpmcounter13) // Performance-monitoring counter
RO( 0xC0E, hpmcounter14) // Performance-monitoring counter
RO( 0xC0F, hpmcounter15) // Performance-monitoring counter
RO( 0xC10, hpmcounter16) // Performance-monitoring counter
RO( 0xC11, hpmcounter17) // Performance-monitoring counter
RO( 0xC12, hpmcounter18) // Performance-monitoring counter
RO( 0xC13, hpmcounter19) // Performance-monitoring counter
RO( 0xC14, hpmcounter20) // Performance-monitoring counter
RO( 0xC15, hpmcounter21) // Performance-monitoring counter
RO( 0xC16, hpmcounter22) // Performance-monitoring counter
RO( 0xC17, hpmcounter23) // Performance-monitoring counter
RO( 0xC18, hpmcounter24) // Performance-monitoring counter
RO( 0xC19, hpmcounter25) // Performance-monitoring counter
RO( 0xC1A, hpmcounter26) // Performance-monitoring counter
RO( 0xC1B, hpmcounter27) // Performance-monitoring counter
RO( 0xC1C, hpmcounter28) // Performance-monitoring counter
RO( 0xC1D, hpmcounter29) // Performance-monitoring counter
RO( 0xC1E, hpmcounter30) // Performance-monitoring counter
RO( 0xC1F, hpmcounter31) // Performance-monitoring counter
RO32(0xC80, cycleh) // Upper 32 bits of cycle, RV32I only
RO32(0xC81, timeh) // Upper 32 bits of time, RV32I only
RO32(0xC82, instreth) // Upper 32 bits of instret, RV32I only
RO32(0xC83, hpmcounter3h) // Upper 32 bits of hpmcounter3, RV32I only
RO32(0xC84, hpmcounter4h)
RO32(0xC85, hpmcounter5h)
RO32(0xC86, hpmcounter6h)
RO32(0xC87, hpmcounter7h)
RO32(0xC88, hpmcounter8h)
RO32(0xC89, hpmcounter9h)
RO32(0xC8A, hpmcounter10h)
RO32(0xC8B, hpmcounter11h)
RO32(0xC8C, hpmcounter12h)
RO32(0xC8D, hpmcounter13h)
RO32(0xC8E, hpmcounter14h)
RO32(0xC8F, hpmcounter15h)
RO32(0xC90, hpmcounter16h)
RO32(0xC91, hpmcounter17h)
RO32(0xC92, hpmcounter18h)
RO32(0xC93, hpmcounter19h)
RO32(0xC94, hpmcounter20h)
RO32(0xC95, hpmcounter21h)
RO32(0xC96, hpmcounter22h)
RO32(0xC97, hpmcounter23h)
RO32(0xC98, hpmcounter24h)
RO32(0xC99, hpmcounter25h)
RO32(0xC9A, hpmcounter26h)
RO32(0xC9B, hpmcounter27h)
RO32(0xC9C, hpmcounter28h)
RO32(0xC9D, hpmcounter29h)
RO32(0xC9E, hpmcounter30h)
RO32(0xC9F, hpmcounter31h)
// Supervisor Trap Setup
RW(0x100, sstatus) // Supervisor status register
RW(0x102, sedeleg) // Supervisor exception delegation register
RW(0x103, sideleg) // Supervisor interrupt delegation register
RW(0x104, sie) // Supervisor interrupt-enable register
RW(0x105, stvec) // Supervisor trap handler base address
RW(0x106, scounteren) // Supervisor counter enable
// Supervisor Trap Handling
RW(0x140, sscratch) // Scratch register for supervisor trap handlers
RW(0x141, sepc) // Supervisor exception program counter
RW(0x142, scause) // Supervisor trap cause
RW(0x143, stval) // Supervisor bad address or instruction
RW(0x144, sip) // Supervisor interrupt pending
// Supervisor Protection and Translation
RW(0x180, satp) // Supervisor address translation and protection
// Machine Information Registers
RO(0xF11, mvendorid) // Vendor ID
RO(0xF12, marchid) // Architecture ID
RO(0xF13, mimpid) // Implementation ID
RO(0xF14, mhartid) // Hardware thread ID
// Machine Trap Setup
RW(0x300, mstatus) // Machine status register
RW(0x301, misa) // ISA and extensions
RW(0x302, medeleg) // Machine exception delegation register
RW(0x303, mideleg) // Machine interrupt delegation register
RW(0x304, mie) // Machine interrupt-enable register
RW(0x305, mtvec) // Machine trap handler base address
RW(0x306, mcounteren) // Machine counter enable
// Machine Trap Handling
RW(0x340, mscratch) // Scratch register for machine trap handlers
RW(0x341, mepc) // Machine exception program counter
RW(0x342, mcause) // Machine trap cause
RW(0x343, mtval) // Machine bad address or instruction
RW(0x344, mip) // Machine interrupt pending
// Machine Protection and Translation
RW( 0x3A0, pmpcfg0) // Physical memory protection configuration
RW32(0x3A1, pmpcfg1) // Physical memory protection configuration, RV32 only
RW( 0x3A2, pmpcfg2) // Physical memory protection configuration
RW32(0x3A3, pmpcfg3) // Physical memory protection configuration, RV32 only
RW( 0x3B0, pmpaddr0) // Physical memory protection address register
RW( 0x3B1, pmpaddr1) // Physical memory protection address register
RW( 0x3B2, pmpaddr2) // Physical memory protection address register
RW( 0x3B3, pmpaddr3) // Physical memory protection address register
RW( 0x3B4, pmpaddr4) // Physical memory protection address register
RW( 0x3B5, pmpaddr5) // Physical memory protection address register
RW( 0x3B6, pmpaddr6) // Physical memory protection address register
RW( 0x3B7, pmpaddr7) // Physical memory protection address register
RW( 0x3B8, pmpaddr8) // Physical memory protection address register
RW( 0x3B9, pmpaddr9) // Physical memory protection address register
RW( 0x3BA, pmpaddr10) // Physical memory protection address register
RW( 0x3BB, pmpaddr11) // Physical memory protection address register
RW( 0x3BC, pmpaddr12) // Physical memory protection address register
RW( 0x3BD, pmpaddr13) // Physical memory protection address register
RW( 0x3BE, pmpaddr14) // Physical memory protection address register
RW( 0x3BF, pmpaddr15) // Physical memory protection address register
// Machine Counter/Timers
RO( 0xB00, mcycle) // Machine cycle counter
RO( 0xB02, minstret) // Machine instructions-retired counter
RO( 0xB03, mhpmcounter3) // Machine performance-monitoring counter
RO( 0xB04, mhpmcounter4) // Machine performance-monitoring counter
RO( 0xB05, mhpmcounter5) // Machine performance-monitoring counter
RO( 0xB06, mhpmcounter6) // Machine performance-monitoring counter
RO( 0xB07, mhpmcounter7) // Machine performance-monitoring counter
RO( 0xB08, mhpmcounter8) // Machine performance-monitoring counter
RO( 0xB09, mhpmcounter9) // Machine performance-monitoring counter
RO( 0xB0A, mhpmcounter10) // Machine performance-monitoring counter
RO( 0xB0B, mhpmcounter11) // Machine performance-monitoring counter
RO( 0xB0C, mhpmcounter12) // Machine performance-monitoring counter
RO( 0xB0D, mhpmcounter13) // Machine performance-monitoring counter
RO( 0xB0E, mhpmcounter14) // Machine performance-monitoring counter
RO( 0xB0F, mhpmcounter15) // Machine performance-monitoring counter
RO( 0xB10, mhpmcounter16) // Machine performance-monitoring counter
RO( 0xB11, mhpmcounter17) // Machine performance-monitoring counter
RO( 0xB12, mhpmcounter18) // Machine performance-monitoring counter
RO( 0xB13, mhpmcounter19) // Machine performance-monitoring counter
RO( 0xB14, mhpmcounter20) // Machine performance-monitoring counter
RO( 0xB15, mhpmcounter21) // Machine performance-monitoring counter
RO( 0xB16, mhpmcounter22) // Machine performance-monitoring counter
RO( 0xB17, mhpmcounter23) // Machine performance-monitoring counter
RO( 0xB18, mhpmcounter24) // Machine performance-monitoring counter
RO( 0xB19, mhpmcounter25) // Machine performance-monitoring counter
RO( 0xB1A, mhpmcounter26) // Machine performance-monitoring counter
RO( 0xB1B, mhpmcounter27) // Machine performance-monitoring counter
RO( 0xB1C, mhpmcounter28) // Machine performance-monitoring counter
RO( 0xB1D, mhpmcounter29) // Machine performance-monitoring counter
RO( 0xB1E, mhpmcounter30) // Machine performance-monitoring counter
RO( 0xB1F, mhpmcounter31) // Machine performance-monitoring counter
RO32(0xB80, mcycleh) // Upper 32 bits of mcycle, RV32I only
RO32(0xB82, minstreth) // Upper 32 bits of minstret, RV32I only
RO32(0xB83, mhpmcounter3h) // Upper 32 bits of mhpmcounter3, RV32I only
RO32(0xB84, mhpmcounter4h)
RO32(0xB85, mhpmcounter5h)
RO32(0xB86, mhpmcounter6h)
RO32(0xB87, mhpmcounter7h)
RO32(0xB88, mhpmcounter8h)
RO32(0xB89, mhpmcounter9h)
RO32(0xB8A, mhpmcounter10h)
RO32(0xB8B, mhpmcounter11h)
RO32(0xB8C, mhpmcounter12h)
RO32(0xB8D, mhpmcounter13h)
RO32(0xB8E, mhpmcounter14h)
RO32(0xB8F, mhpmcounter15h)
RO32(0xB90, mhpmcounter16h)
RO32(0xB91, mhpmcounter17h)
RO32(0xB92, mhpmcounter18h)
RO32(0xB93, mhpmcounter19h)
RO32(0xB94, mhpmcounter20h)
RO32(0xB95, mhpmcounter21h)
RO32(0xB96, mhpmcounter22h)
RO32(0xB97, mhpmcounter23h)
RO32(0xB98, mhpmcounter24h)
RO32(0xB99, mhpmcounter25h)
RO32(0xB9A, mhpmcounter26h)
RO32(0xB9B, mhpmcounter27h)
RO32(0xB9C, mhpmcounter28h)
RO32(0xB9D, mhpmcounter29h)
RO32(0xB9E, mhpmcounter30h)
RO32(0xB9F, mhpmcounter31h)
RW(0x323, mhpmevent3) // Machine performance-monitoring event selector
RW(0x324, mhpmevent4) // Machine performance-monitoring event selector
RW(0x325, mhpmevent5) // Machine performance-monitoring event selector
RW(0x326, mhpmevent6) // Machine performance-monitoring event selector
RW(0x327, mhpmevent7) // Machine performance-monitoring event selector
RW(0x328, mhpmevent8) // Machine performance-monitoring event selector
RW(0x329, mhpmevent9) // Machine performance-monitoring event selector
RW(0x32A, mhpmevent10) // Machine performance-monitoring event selector
RW(0x32B, mhpmevent11) // Machine performance-monitoring event selector
RW(0x32C, mhpmevent12) // Machine performance-monitoring event selector
RW(0x32D, mhpmevent13) // Machine performance-monitoring event selector
RW(0x32E, mhpmevent14) // Machine performance-monitoring event selector
RW(0x32F, mhpmevent15) // Machine performance-monitoring event selector
RW(0x330, mhpmevent16) // Machine performance-monitoring event selector
RW(0x331, mhpmevent17) // Machine performance-monitoring event selector
RW(0x332, mhpmevent18) // Machine performance-monitoring event selector
RW(0x333, mhpmevent19) // Machine performance-monitoring event selector
RW(0x334, mhpmevent20) // Machine performance-monitoring event selector
RW(0x335, mhpmevent21) // Machine performance-monitoring event selector
RW(0x336, mhpmevent22) // Machine performance-monitoring event selector
RW(0x337, mhpmevent23) // Machine performance-monitoring event selector
RW(0x338, mhpmevent24) // Machine performance-monitoring event selector
RW(0x339, mhpmevent25) // Machine performance-monitoring event selector
RW(0x33A, mhpmevent26) // Machine performance-monitoring event selector
RW(0x33B, mhpmevent27) // Machine performance-monitoring event selector
RW(0x33C, mhpmevent28) // Machine performance-monitoring event selector
RW(0x33D, mhpmevent29) // Machine performance-monitoring event selector
RW(0x33E, mhpmevent30) // Machine performance-monitoring event selector
RW(0x33F, mhpmevent31) // Machine performance-monitoring event selector
// Debug/Trace Registers (shared with Debug Mode)
RW(0x7A0, tselect) // Debug/Trace trigger register select
RW(0x7A1, tdata1) // First Debug/Trace trigger data register
RW(0x7A2, tdata2) // Second Debug/Trace trigger data register
RW(0x7A3, tdata3) // Third Debug/Trace trigger data register
// Debug Mode Registers
RW(0x7B0, dcsr) // Debug control and status register
RW(0x7B1, dpc) // Debug PC
RW(0x7B2, dscratch) // Debug scratch register
// Hypervisor Trap Setup
RW(0x600, hstatus) // Hypervisor status register
RW(0x602, hedeleg) // Hypervisor exception delegation register
RW(0x603, hideleg) // Hypervisor interrupt delegation register
RW(0x604, hie) // Hypervisor interrupt-enable register
RW(0x606, hcounteren) // Hypervisor counter enable
RW(0x607, hgeie) // Hypervisor guest external interrupt-enable register
// Hypervisor Trap Handling
RW(0x643, htval) // Hypervisor bad guest physical address
RW(0x644, hip) // Hypervisor interrupt pending
RW(0x645, hvip) // Hypervisor virtual interrupt pending
RW(0x64a, htinst) // Hypervisor trap instruction (transformed)
RW(0xe12, hgeip) // Hypervisor guest external interrupt pending
// Hypervisor Protection and Translation
RO(0x680, hgatp) // Hypervisor guest address translation and protection
// Debug/Trace Registers
RW(0x6a8, hcontext) // Hypervisor-mode context register
// Hypervisor Counter/Timer Virtualization Registers
RW(0x605, htimedelta) // Delta for VS/VU-mode timer
RW32(0x615, htimedeltah) // Upper 32 bits of {\tt htimedelta}, RV32 only
// Virtual Supervisor Registers
RW(0x200, vsstatus) // Virtual supervisor status register
RW(0x204, vsie) // Virtual supervisor interrupt-enable register
RW(0x205, vstvec) // Virtual supervisor trap handler base address
RW(0x240, vsscratch) // Virtual supervisor scratch register
RW(0x241, vsepc) // Virtual supervisor exception program counter
RW(0x242, vscause) // Virtual supervisor trap cause
RW(0x243, vstval) // Virtual supervisor bad address or instruction
RW(0x244, vsip) // Virtual supervisor interrupt pending
RW(0x280, vsatp) // Virtual supervisor address translation and protection

View File

@@ -1,48 +0,0 @@
#ifndef __ASM_H
#define __ASM_H
#define REG_READ(name, offset) \
.section .text.__read_ ## name; \
.global __read_ ## name; \
__read_ ## name: \
csrrs a0, offset, x0; \
ret
#define REG_WRITE(name, offset) \
.section .text.__write_ ## name; \
.global __write_ ## name; \
__write_ ## name: \
csrrw x0, offset, a0; \
ret
#define REG_SET(name, offset) \
.section .text.__set_ ## name; \
.global __set_ ## name; \
__set_ ## name: \
csrrs x0, offset, a0; \
ret
#define REG_CLEAR(name, offset) \
.section .text.__clear_ ## name; \
.global __clear_ ## name; \
__clear_ ## name: \
csrrc x0, offset, a0; \
ret
#define REG_READ_WRITE(name, offset) REG_READ(name, offset); REG_WRITE(name, offset)
#define REG_SET_CLEAR(name, offset) REG_SET(name, offset); REG_CLEAR(name, offset)
#define RW(offset, name) REG_READ_WRITE(name, offset); REG_SET_CLEAR(name, offset)
#define RO(offset, name) REG_READ(name, offset)
#if __riscv_xlen == 32
#define RW32(offset, name) RW(offset, name)
#define RO32(offset, name) RO(offset, name)
#else
#define RW32(offset, name)
#define RO32(offset, name)
#endif
#endif /* __ASM_H */

View File

@@ -1,20 +0,0 @@
New-Item -Force -Name bin -Type Directory
# remove existing blobs because otherwise this will append object files to the old blobs
Remove-Item -Force bin/*.a
$crate = "riscv"
riscv64-unknown-elf-gcc -c -mabi=ilp32 -march=rv32i asm.S -o bin/$crate.o
riscv64-unknown-elf-ar crs bin/riscv32i-unknown-none-elf.a bin/$crate.o
riscv64-unknown-elf-gcc -c -mabi=ilp32 -march=rv32ic asm.S -o bin/$crate.o
riscv64-unknown-elf-ar crs bin/riscv32ic-unknown-none-elf.a bin/$crate.o
riscv64-unknown-elf-gcc -c -mabi=lp64 -march=rv64i asm.S -o bin/$crate.o
riscv64-unknown-elf-ar crs bin/riscv64i-unknown-none-elf.a bin/$crate.o
riscv64-unknown-elf-gcc -c -mabi=lp64 -march=rv64ic asm.S -o bin/$crate.o
riscv64-unknown-elf-ar crs bin/riscv64ic-unknown-none-elf.a bin/$crate.o
Remove-Item bin/$crate.o

View File

@@ -1,22 +0,0 @@
#!/bin/bash
set -euxo pipefail
crate=riscv
# remove existing blobs because otherwise this will append object files to the old blobs
rm -f bin/*.a
riscv64-unknown-elf-gcc -c -mabi=ilp32 -march=rv32i asm.S -o bin/$crate.o
ar crs bin/riscv32i-unknown-none-elf.a bin/$crate.o
riscv64-unknown-elf-gcc -c -mabi=ilp32 -march=rv32ic asm.S -o bin/$crate.o
ar crs bin/riscv32ic-unknown-none-elf.a bin/$crate.o
riscv64-unknown-elf-gcc -c -mabi=lp64 -march=rv64i asm.S -o bin/$crate.o
ar crs bin/riscv64i-unknown-none-elf.a bin/$crate.o
riscv64-unknown-elf-gcc -c -mabi=lp64 -march=rv64ic asm.S -o bin/$crate.o
ar crs bin/riscv64ic-unknown-none-elf.a bin/$crate.o
rm bin/$crate.o

View File

@@ -1,35 +0,0 @@
extern crate riscv_target;
use riscv_target::Target;
use std::path::PathBuf;
use std::{env, fs};
fn main() {
let target = env::var("TARGET").unwrap();
let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
let name = env::var("CARGO_PKG_NAME").unwrap();
if target.starts_with("riscv") && env::var_os("CARGO_FEATURE_INLINE_ASM").is_none() {
let mut target = Target::from_target_str(&target);
target.retain_extensions("ic");
let target = target.to_string();
fs::copy(
format!("bin/{}.a", target),
out_dir.join(format!("lib{}.a", name)),
)
.unwrap();
println!("cargo:rustc-link-lib=static={}", name);
println!("cargo:rustc-link-search={}", out_dir.display());
}
if target.contains("riscv32") {
println!("cargo:rustc-cfg=riscv");
println!("cargo:rustc-cfg=riscv32");
} else if target.contains("riscv64") {
println!("cargo:rustc-cfg=riscv");
println!("cargo:rustc-cfg=riscv64");
}
}

View File

@@ -1,21 +0,0 @@
#!/bin/bash
# Checks that the blobs are up to date with the committed assembly files
set -euxo pipefail
for lib in $(ls bin/*.a); do
filename=$(basename $lib)
riscv64-unknown-elf-objdump -Cd $lib > bin/${filename%.a}.before
done
./assemble.sh
for lib in $(ls bin/*.a); do
filename=$(basename $lib)
riscv64-unknown-elf-objdump -Cd $lib > bin/${filename%.a}.after
done
for cksum in $(ls bin/*.after); do
diff -u $cksum ${cksum%.after}.before
done

View File

@@ -1,8 +0,0 @@
#!/bin/bash
rustc generator.rs
rm -f ../src/register/hypervisorx64/mod.rs;
for i in *.txt; do
./generator <$i > ../src/register/hypervisorx64/`basename -s .txt $i`.rs;
echo "pub mod $(basename -s .txt $i);" >> ../src/register/hypervisorx64/mod.rs;
done
rm -f generator

View File

@@ -1,312 +0,0 @@
use std::fmt::*;
macro_rules! as_str_polyfill {
($x: expr, $r: expr) => {{
let mut y = $x.clone();
if let Some(x) = y.next() {
$r.split_at(x.as_ptr() as usize - $r.as_ptr() as usize).1
} else {
""
}
}};
}
#[derive(Debug, Clone)]
struct EnumerationDescriptor<'a> {
enumerations: Vec<(&'a str, usize)>,
}
impl<'a> EnumerationDescriptor<'a> {
pub fn parse(enums: &'a str) -> Self {
let mut counter = 0;
let list = enums.split(";");
let mut e = Vec::new();
for tup in list {
let mut t = tup.split("=");
let n = t.next().unwrap();
if let Some(new_id) = t.next() {
counter = new_id.parse().unwrap();
}
e.push((n, counter));
counter += 1;
}
EnumerationDescriptor { enumerations: e }
}
fn generate_enum(&self, name: &str) -> String {
let mut ret = String::new();
write!(
&mut ret,
"#[derive(Copy, Clone, Debug)]
#[repr(usize)]
"
)
.unwrap();
write!(&mut ret, "pub enum {}{{\n", name).unwrap();
let mut branches = String::new();
for e in self.enumerations.iter() {
write!(&mut ret, " {} = {},\n", e.0, e.1).unwrap();
write!(&mut branches, " {} => Self::{},\n", e.1, e.0).unwrap();
}
write!(
&mut ret,
"}}
impl {}{{
fn from(x: usize)->Self{{
match x{{
{} _ => unreachable!()
}}
}}
}}
",
name, branches
)
.unwrap();
return ret;
}
}
#[derive(Debug, Clone)]
struct BitFieldDescriptor<'a> {
name: &'a str,
description: &'a str,
lo: usize,
hi: usize,
ed: Option<(&'a str, EnumerationDescriptor<'a>)>,
}
impl<'a> BitFieldDescriptor<'a> {
pub fn parse(desc: &'a str) -> Self {
let mut parts = desc.split(",");
let name = parts.next().unwrap();
let hi = parts.next().unwrap().parse::<usize>().unwrap();
let lo = parts.next().unwrap().parse::<usize>().unwrap();
let (lo, hi) = if lo < hi { (lo, hi) } else { (hi, lo) };
let use_enum = parts.next().unwrap();
let ed = if use_enum != "number" {
let opts = parts.next().unwrap();
Some((use_enum, EnumerationDescriptor::parse(opts)))
} else {
None
};
let description = as_str_polyfill!(parts, desc);
BitFieldDescriptor {
name,
lo,
hi,
description,
ed,
}
}
pub fn generate_enum(&self) -> Option<String> {
if let Some((n, e)) = &self.ed {
Some(e.generate_enum(n))
} else {
None
}
}
pub fn flag_type(&self) -> &str {
if let Some((n, _)) = self.ed {
n
} else {
if self.lo == self.hi {
"bool"
} else {
"usize"
}
}
}
fn mask(&self) -> String {
format!("{}", (1usize << (self.hi - self.lo + 1)) - 1)
}
fn getter(&self) -> String {
if self.lo == self.hi {
return format!("self.bits.get_bit({})", self.lo);
} else if self.flag_type() != "usize" {
return format!(
"{}::from(self.bits.get_bits({}..{}))",
self.flag_type(),
self.lo,
self.hi + 1
);
} else {
return format!("self.bits.get_bits({}..{})", self.lo, self.hi + 1);
}
}
fn setter(&self) -> String {
if self.lo == self.hi {
return format!("self.bits.set_bit({}, val);", self.lo);
} else if self.flag_type() != "usize" {
return format!(
"self.bits.set_bits({}..{}, val as usize);",
self.lo,
self.hi + 1
);
} else {
return format!("self.bits.set_bits({}..{}, val);", self.lo, self.hi + 1);
}
}
fn generate_read_write(&self) -> String {
format!(
" /// {}
#[inline]
pub fn {}(&self)->{}{{
{}
}}
#[inline]
pub fn set_{}(&mut self, val: {}){{
{}
}}\n",
self.description,
self.name,
self.flag_type(),
self.getter(),
self.name,
self.flag_type(),
self.setter()
)
}
fn generate_bit_set(&self) -> String {
format!(
" pub fn set_{}()->bool{{
unsafe {{csr::csrrc({}) & {} !=0}}
}}
pub fn clear_{}()->bool{{
unsafe {{csr::csrrs({}) & {} !=0 }}
}}\n",
self.name,
1usize << self.lo,
1usize << self.lo,
self.name,
1usize << self.lo,
1usize << self.lo
)
}
fn generate_bitops(&self) -> String {
format!(
" set_clear_csr!(
///{}
, set_{}, clear_{}, 1 << {});\n",
self.description, self.name, self.name, self.lo
)
}
}
#[derive(Debug, Clone)]
struct CSRDescriptor<'a> {
name: &'a str,
id: usize,
description: &'a str,
bfs: Vec<BitFieldDescriptor<'a>>,
}
impl<'a> CSRDescriptor<'a> {
fn canonical_name(&self) -> String {
self.name.to_lowercase()
}
pub fn parse(d: &'a str) -> Self {
let mut parts = d.split("\n");
let name = parts.next().unwrap();
let id = parts.next().unwrap().parse::<usize>().unwrap();
let mut bfs = Vec::new();
while let Some(x) = parts.next() {
if x == "end" {
break;
} else {
bfs.push(BitFieldDescriptor::parse(x));
}
}
CSRDescriptor {
name,
id,
description: as_str_polyfill!(parts, d),
bfs,
}
}
pub fn generate(&self) -> String {
let mut trait_impls = String::new();
let mut bit_sets = String::new();
let mut enums = String::new();
for bf in self.bfs.iter() {
if bf.lo == bf.hi {
write!(&mut bit_sets, "{}", bf.generate_bitops()).unwrap();
//write!(&mut trait_impls, "{}",bf.generate_bit_set()).unwrap();
}
write!(&mut trait_impls, "{}", bf.generate_read_write()).unwrap();
if let Some(x) = bf.generate_enum() {
write!(&mut enums, "{}", x).unwrap();
}
}
if &trait_impls == "" && &bit_sets == "" {
format!(
"
//! {}
read_csr_as_usize!({}, __read_{});
write_csr_as_usize!({}, __write_{});
",
self.description,
self.id,
self.canonical_name(),
self.id,
self.canonical_name()
)
} else {
format!(
"
//! {}
use bit_field::BitField;
#[derive(Copy, Clone, Debug)]
pub struct {}{{\n bits: usize,\n}}
impl {}{{
#[inline]
pub fn bits(&self) -> usize{{
return self.bits;
}}
#[inline]
pub fn from_bits(x: usize) -> Self{{
return {}{{bits: x}};
}}
#[inline]
pub unsafe fn write(&self){{
_write(self.bits);
}}
{}
}}
read_csr_as!({}, {}, __read_{});
write_csr!({}, __write_{});
set!({}, __set_{});
clear!({}, __clear_{});
// bit ops
{}
// enums
{}
",
self.description,
self.name,
self.name,
self.name,
trait_impls,
self.name,
self.id,
self.canonical_name(),
self.id,
self.canonical_name(),
self.id,
self.canonical_name(),
self.id,
self.canonical_name(),
bit_sets,
enums,
)
}
}
}
fn main() {
use std::io::Read;
let mut buffer = String::new();
std::io::stdin().read_to_string(&mut buffer).unwrap();
let csr = CSRDescriptor::parse(&buffer);
println!("{}", csr.generate());
}

View File

@@ -1,36 +0,0 @@
Hcounteren
3602
cy,0,0,number,
tm,1,1,number,
ir,2,2,number,
hpm3,3,3,number,
hpm4,4,4,number,
hpm5,5,5,number,
hpm6,6,6,number,
hpm7,7,7,number,
hpm8,8,8,number,
hpm9,9,9,number,
hpm10,10,10,number,
hpm11,11,11,number,
hpm12,12,12,number,
hpm13,13,13,number,
hpm14,14,14,number,
hpm15,15,15,number,
hpm16,16,16,number,
hpm17,17,17,number,
hpm18,18,18,number,
hpm19,19,19,number,
hpm20,20,20,number,
hpm21,21,21,number,
hpm22,22,22,number,
hpm23,23,23,number,
hpm24,24,24,number,
hpm25,25,25,number,
hpm26,26,26,number,
hpm27,27,27,number,
hpm28,28,28,number,
hpm29,29,29,number,
hpm30,30,30,number,
hpm31,31,31,number,
end
Hypervisor Guest External Interrupt Pending Register.

View File

@@ -1,16 +0,0 @@
Hedeleg
1538
ex0,0,0,number,Instruction address misaligned
ex1,1,1,number,Instruction access fault
ex2,2,2,number,Illegal instruction
ex3,3,3,number,Breakpoint
ex4,4,4,number,Load address misaligned
ex5,5,5,number,Load access fault
ex6,6,6,number,Store/AMO address misaligned
ex7,7,7,number,Store/AMO access fault
ex8,8,8,number,Environment call from U-mode or VU-mode
ex12,12,12,number,Instruction page fault
ex13,13,13,number,Load page fault
ex15,15,15,number,Store/AMO page fault
end
Hypervisor Exception Delegation Register.

View File

@@ -1,7 +0,0 @@
Hgatp
1664
mode,63,60,HgatpValues,Bare=0;Sv39x4=8;Sv48x4=9,Guest address translation mode.
vmid,57,44,number,Virtual machine ID.
ppn,43,0,number,Physical Page Number for root page table.
end
Hypervisor Guest Address Translation and Protection Register.

View File

@@ -1,4 +0,0 @@
Hgeie
1543
end
Hypervisor Guest External Interrupt Enable Register.

View File

@@ -1,4 +0,0 @@
Hgeip
3602
end
Hypervisor Guest External Interrupt Pending Register.

View File

@@ -1,7 +0,0 @@
Hideleg
1539
sip,2,2,number,Software Interrupt
tip,6,6,number,Timer Interrupt
eip,10,10,number,External Interrupt
end
Hypervisor Interrupt Delegation Register.

View File

@@ -1,8 +0,0 @@
Hie
1540
vssie,2,2,number,Software Interrupt
vstie,6,6,number,Timer Interrupt
vseie,10,10,number,External Interrupt
sgeie,12,12,number,Guest External Interrupt
end
Hypervisor Interrupt Enable Register.

View File

@@ -1,8 +0,0 @@
Hip
1604
vssip,2,2,number,Software Interrupt
vstip,6,6,number,Timer Interrupt
vseip,10,10,number,External Interrupt
sgeip,12,12,number,Guest External Interrupt
end
Hypervisor Interrupt Pending Register.

View File

@@ -1,14 +0,0 @@
Hstatus
1536
vsxl,33,32,VsxlValues,Vsxl32=1;Vsxl64;Vsxl128,Effective XLEN for VM.
vtsr,22,22,number,TSR for VM.
vtw,21,21,number,TW for VM.
vtvm,20,20,number,TVM for VM.
vgein,17,12,number,Virtual Guest External Interrupt Number.
hu,9,9,number,Hypervisor User mode.
spvp,8,8,number,Supervisor Previous Virtual Privilege.
spv,7,7,number,Supervisor Previous Virtualization mode.
gva,6,6,number,Guest Virtual Address.
vsbe,5,5,number,VS access endianness.
end
HStatus Register.

View File

@@ -1,5 +0,0 @@
Htimedelta
1541
end
Hypervisor Time Delta Register.
read_composite_csr!(super::htimedeltah::read(), read());

View File

@@ -1,4 +0,0 @@
Htimedeltah
1557
end
Hypervisor Time Delta Register.

View File

@@ -1,4 +0,0 @@
Htinst
1610
end
Hypervisor Trap Instruction Register.

View File

@@ -1,4 +0,0 @@
Htval
1603
end
Hypervisor Trap Value Register.

View File

@@ -1,7 +0,0 @@
Hvip
1605
vssip,2,2,number,Software Interrupt
vstip,6,6,number,Timer Interrupt
vseip,10,10,number,External Interrupt
end
Hypervisor Virtual Interrupt Pending Register.

View File

@@ -1,7 +0,0 @@
Vsatp
640
mode,63,60,HgatpValues,Bare=0;Sv39x4=8;Sv48x4=9,Guest address translation mode.
asid,59,44,number,ASID.
ppn,43,0,number,Physical Page Number for root page table.
end
Virtual Supervisor Guest Address Translation and Protection Register.

View File

@@ -1,6 +0,0 @@
Vscause
578
interrupt,63,63,number,Is cause interrupt.
code,62,0,number,Exception code
end
Virtual Supervisor Cause Register.

View File

@@ -1,4 +0,0 @@
Vsepc
577
end
Virtual Supervisor Exception Program Counter.

View File

@@ -1,7 +0,0 @@
Vsie
516
ssie,1,1,number,Software Interrupt
stie,5,5,number,Timer Interrupt
seie,9,9,number,External Interrupt
end
Virtual Supevisor Interrupt Enable Register.

View File

@@ -1,7 +0,0 @@
Vsip
580
ssip,1,1,number,Software Interrupt
stip,5,5,number,Timer Interrupt
seip,9,9,number,External Interrupt
end
Virtual Supevisor Interrupt Pending Register.

View File

@@ -1,4 +0,0 @@
Vsscratch
576
end
Virtual Supervisor Scratch Register.

View File

@@ -1,14 +0,0 @@
Vsstatus
512
sd,63,60,number,
uxl,33,32,UxlValues,Uxl32=1;Uxl64;Uxl128,Effective User XLEN.
mxr,19,19,number,
sum,18,18,number,
xs,16,15,number,
fs,14,13,number,
spp,8,8,number,
ube,6,6,number,
spie,5,5,number,
sie,1,1,number,
end
Hypervisor Guest External Interrupt Pending Register.

View File

@@ -1,4 +0,0 @@
Vstval
579
end
Virtual Supervisor Trap Value Register.

View File

@@ -1,6 +0,0 @@
Vstvec
517
base,63,2,number,
mode,1,0,number,
end
Virtual Supervisor Trap Vector Base Address Register.

View File

@@ -1,211 +0,0 @@
use super::*;
use bit_field::BitField;
use core::convert::TryInto;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct GPAddrSv32X4(u64);
impl Address for GPAddrSv32X4 {
fn new(addr: usize) -> Self {
Self::new_u64(addr as u64)
}
fn as_usize(&self) -> usize {
self.0 as usize
}
fn page_number(&self) -> usize {
self.0.get_bits(12..34) as usize
}
fn page_offset(&self) -> usize {
self.0.get_bits(0..12) as usize
}
fn to_4k_aligned(&self) -> Self {
GPAddrSv32X4((self.0 >> 12) << 12)
}
}
impl VirtualAddress for GPAddrSv32X4 {
unsafe fn as_mut<'a, 'b, T>(&'a self) -> &'b mut T {
&mut *(self.0 as *mut T)
}
}
impl AddressL2 for GPAddrSv32X4 {
fn p2_index(&self) -> usize {
self.0.get_bits(22..34) as usize
}
fn p1_index(&self) -> usize {
self.0.get_bits(12..22) as usize
}
fn from_page_table_indices(p2_index: usize, p1_index: usize, offset: usize) -> Self {
let p2_index = p2_index as u64;
let p1_index = p1_index as u64;
let offset = offset as u64;
assert!(p2_index.get_bits(12..) == 0, "p2_index exceeding 12 bits");
assert!(p1_index.get_bits(10..) == 0, "p1_index exceeding 10 bits");
assert!(offset.get_bits(12..) == 0, "offset exceeding 12 bits");
GPAddrSv32X4::new_u64((p2_index << 22) | (p1_index << 12) | offset)
}
}
impl AddressX64 for GPAddrSv32X4 {
fn new_u64(addr: u64) -> Self {
assert!(
addr.get_bits(34..64) == 0,
"Sv32x4 does not allow pa 34..64!=0"
);
GPAddrSv32X4(addr)
}
fn as_u64(&self) -> u64 {
self.0
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct GPAddrSv39X4(u64);
impl Address for GPAddrSv39X4 {
fn new(addr: usize) -> Self {
GPAddrSv39X4(addr.try_into().unwrap())
}
fn as_usize(&self) -> usize {
self.0 as usize
}
fn page_number(&self) -> usize {
self.0.get_bits(12..41) as usize
}
fn page_offset(&self) -> usize {
self.0.get_bits(0..12) as usize
}
fn to_4k_aligned(&self) -> Self {
GPAddrSv39X4((self.0 >> 12) << 12)
}
}
impl VirtualAddress for GPAddrSv39X4 {
unsafe fn as_mut<'a, 'b, T>(&'a self) -> &'b mut T {
&mut *(self.0 as *mut T)
}
}
impl AddressL3 for GPAddrSv39X4 {
fn p3_index(&self) -> usize {
self.0.get_bits(30..41) as usize
}
fn p2_index(&self) -> usize {
self.0.get_bits(21..30) as usize
}
fn p1_index(&self) -> usize {
self.0.get_bits(12..21) as usize
}
fn from_page_table_indices(
p3_index: usize,
p2_index: usize,
p1_index: usize,
offset: usize,
) -> Self {
let p3_index = p3_index as u64;
let p2_index = p2_index as u64;
let p1_index = p1_index as u64;
let offset = offset as u64;
assert!(p3_index.get_bits(11..) == 0, "p3_index exceeding 11 bits");
assert!(p2_index.get_bits(9..) == 0, "p2_index exceeding 9 bits");
assert!(p1_index.get_bits(9..) == 0, "p1_index exceeding 9 bits");
assert!(offset.get_bits(12..) == 0, "offset exceeding 12 bits");
GPAddrSv39X4::new_u64(
(p3_index << 12 << 9 << 9) | (p2_index << 12 << 9) | (p1_index << 12) | offset,
)
}
}
impl AddressX64 for GPAddrSv39X4 {
fn new_u64(addr: u64) -> Self {
assert!(
addr.get_bits(41..64) == 0,
"Sv39x4 does not allow pa 41..64!=0"
);
GPAddrSv39X4(addr)
}
fn as_u64(&self) -> u64 {
self.0
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct GPAddrSv48X4(u64);
impl Address for GPAddrSv48X4 {
fn new(addr: usize) -> Self {
GPAddrSv48X4(addr.try_into().unwrap())
}
fn as_usize(&self) -> usize {
self.0 as usize
}
fn page_number(&self) -> usize {
self.0.get_bits(12..50) as usize
}
fn page_offset(&self) -> usize {
self.0.get_bits(0..12) as usize
}
fn to_4k_aligned(&self) -> Self {
GPAddrSv48X4((self.0 >> 12) << 12)
}
}
impl VirtualAddress for GPAddrSv48X4 {
unsafe fn as_mut<'a, 'b, T>(&'a self) -> &'b mut T {
&mut *(self.0 as *mut T)
}
}
impl AddressL4 for GPAddrSv48X4 {
fn p4_index(&self) -> usize {
self.0.get_bits(39..50) as usize
}
fn p3_index(&self) -> usize {
self.0.get_bits(30..39) as usize
}
fn p2_index(&self) -> usize {
self.0.get_bits(21..30) as usize
}
fn p1_index(&self) -> usize {
self.0.get_bits(12..21) as usize
}
fn from_page_table_indices(
p4_index: usize,
p3_index: usize,
p2_index: usize,
p1_index: usize,
offset: usize,
) -> Self {
let p4_index = p4_index as u64;
let p3_index = p3_index as u64;
let p2_index = p2_index as u64;
let p1_index = p1_index as u64;
let offset = offset as u64;
assert!(p4_index.get_bits(11..) == 0, "p4_index exceeding 11 bits");
assert!(p3_index.get_bits(9..) == 0, "p3_index exceeding 9 bits");
assert!(p2_index.get_bits(9..) == 0, "p2_index exceeding 9 bits");
assert!(p1_index.get_bits(9..) == 0, "p1_index exceeding 9 bits");
assert!(offset.get_bits(12..) == 0, "offset exceeding 12 bits");
GPAddrSv48X4::new_u64(
(p4_index << 12 << 9 << 9 << 9)
| (p3_index << 12 << 9 << 9)
| (p2_index << 12 << 9)
| (p1_index << 12)
| offset,
)
}
}
impl AddressX64 for GPAddrSv48X4 {
fn new_u64(addr: u64) -> Self {
assert!(
addr.get_bits(50..64) == 0,
"Sv48x4 does not allow pa 50..64!=0"
);
GPAddrSv48X4(addr)
}
fn as_u64(&self) -> u64 {
self.0
}
}

View File

@@ -1,98 +0,0 @@
pub trait Address: core::fmt::Debug + Copy + Clone + PartialEq + Eq + PartialOrd + Ord {
fn new(addr: usize) -> Self;
fn page_number(&self) -> usize;
fn page_offset(&self) -> usize;
fn to_4k_aligned(&self) -> Self;
fn as_usize(&self) -> usize;
}
pub trait VirtualAddress: Address {
unsafe fn as_mut<'a, 'b, T>(&'a self) -> &'b mut T;
}
pub trait AddressX32: Address {
fn new_u32(addr: u32) -> Self;
fn as_u32(&self) -> u32;
}
pub trait AddressX64: Address {
fn new_u64(addr: u64) -> Self;
fn as_u64(&self) -> u64;
}
pub trait PhysicalAddress: AddressX64 {}
pub trait AddressL3: Address {
fn p3_index(&self) -> usize;
fn p2_index(&self) -> usize;
fn p1_index(&self) -> usize;
fn from_page_table_indices(
p3_index: usize,
p2_index: usize,
p1_index: usize,
offset: usize,
) -> Self;
}
pub trait AddressL4: Address {
fn p4_index(&self) -> usize;
fn p3_index(&self) -> usize;
fn p2_index(&self) -> usize;
fn p1_index(&self) -> usize;
fn from_page_table_indices(
p4_index: usize,
p3_index: usize,
p2_index: usize,
p1_index: usize,
offset: usize,
) -> Self;
}
pub trait AddressL2: Address {
fn p2_index(&self) -> usize;
fn p1_index(&self) -> usize;
fn from_page_table_indices(p2_index: usize, p1_index: usize, offset: usize) -> Self;
}
pub mod gpax4;
pub mod page;
pub mod sv32;
pub mod sv39;
pub mod sv48;
pub use self::gpax4::*;
pub use self::page::*;
pub use self::sv32::*;
pub use self::sv39::*;
pub use self::sv48::*;
#[macro_export]
macro_rules! use_sv32 {
() => {
pub type VirtAddr = VirtAddrSv32;
pub type PhysAddr = PhysAddrSv32;
pub type Page = PageWith<VirtAddr>;
pub type Frame = FrameWith<PhysAddr>;
};
}
#[macro_export]
macro_rules! use_sv39 {
() => {
pub type VirtAddr = VirtAddrSv39;
pub type PhysAddr = PhysAddrSv39;
pub type Page = PageWith<VirtAddr>;
pub type Frame = FrameWith<PhysAddr>;
};
}
#[macro_export]
macro_rules! use_sv48 {
() => {
pub type VirtAddr = VirtAddrSv48;
pub type PhysAddr = PhysAddrSv48;
pub type Page = PageWith<VirtAddr>;
pub type Frame = FrameWith<PhysAddr>;
};
}
#[cfg(target_arch = "riscv64")]
use_sv48!();
#[cfg(target_arch = "riscv32")]
use_sv32!();

View File

@@ -1,174 +0,0 @@
pub use super::*;
pub use bit_field::BitField;
pub trait PageWithL4 {
fn p4_index(&self) -> usize;
fn p3_index(&self) -> usize;
fn p2_index(&self) -> usize;
fn p1_index(&self) -> usize;
fn from_page_table_indices(
p4_index: usize,
p3_index: usize,
p2_index: usize,
p1_index: usize,
) -> Self;
}
pub trait PageWithL3 {
fn p3_index(&self) -> usize;
fn p2_index(&self) -> usize;
fn p1_index(&self) -> usize;
fn from_page_table_indices(p3_index: usize, p2_index: usize, p1_index: usize) -> Self;
}
pub trait PageWithL2 {
fn p2_index(&self) -> usize;
fn p1_index(&self) -> usize;
fn from_page_table_indices(p2_index: usize, p1_index: usize) -> Self;
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct PageWith<T: VirtualAddress>(T);
impl<T: AddressL4 + VirtualAddress> PageWithL4 for PageWith<T> {
fn p4_index(&self) -> usize {
self.0.p4_index()
}
fn p3_index(&self) -> usize {
self.0.p3_index()
}
fn p2_index(&self) -> usize {
self.0.p2_index()
}
fn p1_index(&self) -> usize {
self.0.p1_index()
}
fn from_page_table_indices(
p4_index: usize,
p3_index: usize,
p2_index: usize,
p1_index: usize,
) -> Self {
PageWith::of_addr(T::from_page_table_indices(
p4_index, p3_index, p2_index, p1_index, 0,
))
}
}
impl<T: AddressL3 + VirtualAddress> PageWithL3 for PageWith<T> {
fn p3_index(&self) -> usize {
self.0.p3_index()
}
fn p2_index(&self) -> usize {
self.0.p2_index()
}
fn p1_index(&self) -> usize {
self.0.p1_index()
}
fn from_page_table_indices(p3_index: usize, p2_index: usize, p1_index: usize) -> Self {
PageWith::of_addr(T::from_page_table_indices(p3_index, p2_index, p1_index, 0))
}
}
impl<T: AddressL2 + VirtualAddress> PageWithL2 for PageWith<T> {
fn p2_index(&self) -> usize {
self.0.p2_index()
}
fn p1_index(&self) -> usize {
self.0.p1_index()
}
fn from_page_table_indices(p2_index: usize, p1_index: usize) -> Self {
PageWith::of_addr(T::from_page_table_indices(p2_index, p1_index, 0))
}
}
impl<T: VirtualAddress> PageWith<T> {
pub fn of_addr(addr: T) -> Self {
PageWith(addr.to_4k_aligned())
}
pub fn of_vpn(vpn: usize) -> Self {
PageWith(T::new(vpn << 12))
}
pub fn start_address(&self) -> T {
self.0.clone()
}
pub fn number(&self) -> usize {
self.0.page_number()
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct FrameWith<T: PhysicalAddress>(T);
impl<T: AddressL4 + PhysicalAddress> PageWithL4 for FrameWith<T> {
fn p4_index(&self) -> usize {
self.0.p4_index()
}
fn p3_index(&self) -> usize {
self.0.p3_index()
}
fn p2_index(&self) -> usize {
self.0.p2_index()
}
fn p1_index(&self) -> usize {
self.0.p1_index()
}
fn from_page_table_indices(
p4_index: usize,
p3_index: usize,
p2_index: usize,
p1_index: usize,
) -> Self {
FrameWith::of_addr(T::from_page_table_indices(
p4_index, p3_index, p2_index, p1_index, 0,
))
}
}
impl<T: AddressL3 + PhysicalAddress> PageWithL3 for FrameWith<T> {
fn p3_index(&self) -> usize {
self.0.p3_index()
}
fn p2_index(&self) -> usize {
self.0.p2_index()
}
fn p1_index(&self) -> usize {
self.0.p1_index()
}
fn from_page_table_indices(p3_index: usize, p2_index: usize, p1_index: usize) -> Self {
FrameWith::of_addr(T::from_page_table_indices(p3_index, p2_index, p1_index, 0))
}
}
impl<T: AddressL2 + PhysicalAddress> PageWithL2 for FrameWith<T> {
fn p2_index(&self) -> usize {
self.0.p2_index()
}
fn p1_index(&self) -> usize {
self.0.p1_index()
}
fn from_page_table_indices(p2_index: usize, p1_index: usize) -> Self {
FrameWith::of_addr(T::from_page_table_indices(p2_index, p1_index, 0))
}
}
impl<T: PhysicalAddress> FrameWith<T> {
pub fn of_addr(addr: T) -> Self {
FrameWith(addr.to_4k_aligned())
}
#[inline(always)]
pub fn of_ppn(ppn: usize) -> Self {
FrameWith(T::new_u64((ppn as u64) << 12))
}
pub fn start_address(&self) -> T {
self.0.clone()
}
pub fn number(&self) -> usize {
self.0.page_number()
}
pub unsafe fn as_kernel_mut<'a, 'b, U>(&'a self, linear_offset: u64) -> &'b mut U {
&mut *(((self.0).as_u64() + linear_offset) as *mut U)
}
}

View File

@@ -1,91 +0,0 @@
use super::*;
use bit_field::BitField;
use core::convert::TryInto;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct VirtAddrSv32(u32);
impl Address for VirtAddrSv32 {
fn new(addr: usize) -> Self {
VirtAddrSv32(addr.try_into().unwrap())
}
fn as_usize(&self) -> usize {
self.0 as usize
}
fn page_number(&self) -> usize {
self.0.get_bits(12..32) as usize
}
fn page_offset(&self) -> usize {
self.0.get_bits(0..12) as usize
}
fn to_4k_aligned(&self) -> Self {
VirtAddrSv32((self.0 >> 12) << 12)
}
}
impl VirtualAddress for VirtAddrSv32 {
unsafe fn as_mut<'a, 'b, T>(&'a self) -> &'b mut T {
&mut *(self.0 as *mut T)
}
}
impl AddressL2 for VirtAddrSv32 {
fn p2_index(&self) -> usize {
self.0.get_bits(22..32) as usize
}
fn p1_index(&self) -> usize {
self.0.get_bits(12..22) as usize
}
fn from_page_table_indices(p2_index: usize, p1_index: usize, offset: usize) -> Self {
assert!(p2_index.get_bits(10..) == 0, "p2_index exceeding 10 bits");
assert!(p1_index.get_bits(10..) == 0, "p1_index exceeding 10 bits");
assert!(offset.get_bits(12..) == 0, "offset exceeding 12 bits");
VirtAddrSv32::new((p2_index << 22) | (p1_index << 12) | offset)
}
}
impl AddressX32 for VirtAddrSv32 {
fn new_u32(addr: u32) -> Self {
VirtAddrSv32(addr)
}
fn as_u32(&self) -> u32 {
self.0
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct PhysAddrSv32(u64);
impl Address for PhysAddrSv32 {
fn new(addr: usize) -> Self {
Self::new_u64(addr as u64)
}
fn as_usize(&self) -> usize {
assert!(
self.0.get_bits(32..34) == 0,
"Downcasting an Sv32 pa >4GB (32..34!=0) will cause address loss."
);
self.0 as usize
}
fn page_number(&self) -> usize {
self.0.get_bits(12..34) as usize
}
fn page_offset(&self) -> usize {
self.0.get_bits(0..12) as usize
}
fn to_4k_aligned(&self) -> Self {
PhysAddrSv32((self.0 >> 12) << 12)
}
}
impl AddressX64 for PhysAddrSv32 {
fn new_u64(addr: u64) -> Self {
assert!(
addr.get_bits(34..64) == 0,
"Sv32 does not allow pa 34..64!=0"
);
PhysAddrSv32(addr)
}
fn as_u64(&self) -> u64 {
self.0
}
}
impl PhysicalAddress for PhysAddrSv32 {}

View File

@@ -1,115 +0,0 @@
use super::*;
use bit_field::BitField;
use core::convert::TryInto;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct VirtAddrSv39(u64);
impl VirtualAddress for VirtAddrSv39 {
unsafe fn as_mut<'a, 'b, T>(&'a self) -> &'b mut T {
&mut *(self.0 as *mut T)
}
}
impl Address for VirtAddrSv39 {
fn new(addr: usize) -> Self {
Self::new_u64(addr as u64)
}
fn as_usize(&self) -> usize {
self.0.try_into().unwrap()
}
fn page_number(&self) -> usize {
self.0.get_bits(12..39).try_into().unwrap()
}
fn page_offset(&self) -> usize {
self.0.get_bits(0..12) as usize
}
fn to_4k_aligned(&self) -> Self {
VirtAddrSv39((self.0 >> 12) << 12)
}
}
impl AddressL3 for VirtAddrSv39 {
fn p3_index(&self) -> usize {
self.0.get_bits(30..39) as usize
}
fn p2_index(&self) -> usize {
self.0.get_bits(21..30) as usize
}
fn p1_index(&self) -> usize {
self.0.get_bits(12..21) as usize
}
fn from_page_table_indices(
p3_index: usize,
p2_index: usize,
p1_index: usize,
offset: usize,
) -> Self {
let p3_index = p3_index as u64;
let p2_index = p2_index as u64;
let p1_index = p1_index as u64;
let offset = offset as u64;
assert!(p3_index.get_bits(11..) == 0, "p3_index exceeding 11 bits");
assert!(p2_index.get_bits(9..) == 0, "p2_index exceeding 9 bits");
assert!(p1_index.get_bits(9..) == 0, "p1_index exceeding 9 bits");
assert!(offset.get_bits(12..) == 0, "offset exceeding 12 bits");
let mut addr =
(p3_index << 12 << 9 << 9) | (p2_index << 12 << 9) | (p1_index << 12) | offset;
if addr.get_bit(38) {
addr.set_bits(39..64, (1 << (64 - 39)) - 1);
} else {
addr.set_bits(39..64, 0x0000);
}
VirtAddrSv39::new_u64(addr)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct PhysAddrSv39(u64);
impl Address for PhysAddrSv39 {
fn new(addr: usize) -> Self {
Self::new_u64(addr as u64)
}
fn as_usize(&self) -> usize {
self.0.try_into().unwrap()
}
fn page_number(&self) -> usize {
self.0.get_bits(12..56) as usize
}
fn page_offset(&self) -> usize {
self.0.get_bits(0..12) as usize
}
fn to_4k_aligned(&self) -> Self {
PhysAddrSv39((self.0 >> 12) << 12)
}
}
impl AddressX64 for VirtAddrSv39 {
fn new_u64(addr: u64) -> Self {
if addr.get_bit(38) {
assert!(
addr.get_bits(39..64) == (1 << (64 - 39)) - 1,
"va 39..64 is not sext"
);
} else {
assert!(addr.get_bits(39..64) == 0x0000, "va 39..64 is not sext");
}
VirtAddrSv39(addr as u64)
}
fn as_u64(&self) -> u64 {
self.0
}
}
impl AddressX64 for PhysAddrSv39 {
fn new_u64(addr: u64) -> Self {
assert!(
addr.get_bits(56..64) == 0,
"Sv39 does not allow pa 56..64!=0"
);
PhysAddrSv39(addr)
}
fn as_u64(&self) -> u64 {
self.0
}
}
impl PhysicalAddress for PhysAddrSv39 {}

View File

@@ -1,125 +0,0 @@
use super::*;
use bit_field::BitField;
use core::convert::TryInto;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct VirtAddrSv48(u64);
impl VirtualAddress for VirtAddrSv48 {
unsafe fn as_mut<'a, 'b, T>(&'a self) -> &'b mut T {
&mut *(self.0 as *mut T)
}
}
impl Address for VirtAddrSv48 {
fn new(addr: usize) -> Self {
Self::new_u64(addr as u64)
}
fn as_usize(&self) -> usize {
self.0.try_into().unwrap()
}
fn page_number(&self) -> usize {
self.0.get_bits(12..48).try_into().unwrap()
}
fn page_offset(&self) -> usize {
self.0.get_bits(0..12) as usize
}
fn to_4k_aligned(&self) -> Self {
VirtAddrSv48((self.0 >> 12) << 12)
}
}
impl AddressL4 for VirtAddrSv48 {
fn p4_index(&self) -> usize {
self.0.get_bits(39..48) as usize
}
fn p3_index(&self) -> usize {
self.0.get_bits(30..39) as usize
}
fn p2_index(&self) -> usize {
self.0.get_bits(21..30) as usize
}
fn p1_index(&self) -> usize {
self.0.get_bits(12..21) as usize
}
fn from_page_table_indices(
p4_index: usize,
p3_index: usize,
p2_index: usize,
p1_index: usize,
offset: usize,
) -> Self {
let p4_index = p4_index as u64;
let p3_index = p3_index as u64;
let p2_index = p2_index as u64;
let p1_index = p1_index as u64;
let offset = offset as u64;
assert!(p4_index.get_bits(9..) == 0, "p4_index exceeding 9 bits");
assert!(p3_index.get_bits(9..) == 0, "p3_index exceeding 9 bits");
assert!(p2_index.get_bits(9..) == 0, "p2_index exceeding 9 bits");
assert!(p1_index.get_bits(9..) == 0, "p1_index exceeding 9 bits");
assert!(offset.get_bits(12..) == 0, "offset exceeding 12 bits");
let mut addr = (p4_index << 12 << 9 << 9 << 9)
| (p3_index << 12 << 9 << 9)
| (p2_index << 12 << 9)
| (p1_index << 12)
| offset;
if addr.get_bit(47) {
addr.set_bits(48..64, (1 << (64 - 48)) - 1);
} else {
addr.set_bits(48..64, 0x0000);
}
VirtAddrSv48::new_u64(addr)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct PhysAddrSv48(u64);
impl Address for PhysAddrSv48 {
fn new(addr: usize) -> Self {
Self::new_u64(addr as u64)
}
fn as_usize(&self) -> usize {
self.0.try_into().unwrap()
}
fn page_number(&self) -> usize {
self.0.get_bits(12..56) as usize
}
fn page_offset(&self) -> usize {
self.0.get_bits(0..12) as usize
}
fn to_4k_aligned(&self) -> Self {
PhysAddrSv48((self.0 >> 12) << 12)
}
}
impl AddressX64 for VirtAddrSv48 {
fn new_u64(addr: u64) -> Self {
if addr.get_bit(47) {
assert!(
addr.get_bits(48..64) == (1 << (64 - 48)) - 1,
"va 48..64 is not sext"
);
} else {
assert!(addr.get_bits(48..64) == 0x0000, "va 48..64 is not sext");
}
VirtAddrSv48(addr as u64)
}
fn as_u64(&self) -> u64 {
self.0
}
}
impl AddressX64 for PhysAddrSv48 {
fn new_u64(addr: u64) -> Self {
assert!(
addr.get_bits(56..64) == 0,
"Sv48 does not allow pa 56..64!=0"
);
PhysAddrSv48(addr)
}
fn as_u64(&self) -> u64 {
self.0
}
}
impl PhysicalAddress for PhysAddrSv48 {}

View File

@@ -1,152 +0,0 @@
//! Assembly instructions
macro_rules! instruction {
($(#[$attr:meta])*, $fnname:ident, $asm:expr, $asm_fn:ident) => (
$(#[$attr])*
#[inline]
pub unsafe fn $fnname() {
match () {
#[cfg(all(riscv, feature = "inline-asm"))]
() => core::arch::asm!($asm),
#[cfg(all(riscv, not(feature = "inline-asm")))]
() => {
extern "C" {
fn $asm_fn();
}
$asm_fn();
}
#[cfg(not(riscv))]
() => unimplemented!(),
}
}
)
}
instruction!(
/// `EBREAK` instruction wrapper
///
/// Generates a breakpoint exception.
, ebreak, "ebreak", __ebreak);
instruction!(
/// `WFI` instruction wrapper
///
/// Provides a hint to the implementation that the current hart can be stalled until an interrupt might need servicing.
/// The WFI instruction is just a hint, and a legal implementation is to implement WFI as a NOP.
, wfi, "wfi", __wfi);
instruction!(
/// `SFENCE.VMA` instruction wrapper (all address spaces and page table levels)
///
/// Synchronizes updates to in-memory memory-management data structures with current execution.
/// Instruction execution causes implicit reads and writes to these data structures; however, these implicit references
/// are ordinarily not ordered with respect to loads and stores in the instruction stream.
/// Executing an `SFENCE.VMA` instruction guarantees that any stores in the instruction stream prior to the
/// `SFENCE.VMA` are ordered before all implicit references subsequent to the `SFENCE.VMA`.
, sfence_vma_all, "sfence.vma", __sfence_vma_all);
/// `SFENCE.VMA` instruction wrapper
///
/// Synchronizes updates to in-memory memory-management data structures with current execution.
/// Instruction execution causes implicit reads and writes to these data structures; however, these implicit references
/// are ordinarily not ordered with respect to loads and stores in the instruction stream.
/// Executing an `SFENCE.VMA` instruction guarantees that any stores in the instruction stream prior to the
/// `SFENCE.VMA` are ordered before all implicit references subsequent to the `SFENCE.VMA`.
#[inline]
#[allow(unused_variables)]
pub unsafe fn sfence_vma(asid: usize, addr: usize) {
match () {
#[cfg(all(riscv, feature = "inline-asm"))]
() => core::arch::asm!("sfence.vma {0}, {1}", in(reg) asid, in(reg) addr),
#[cfg(all(riscv, not(feature = "inline-asm")))]
() => {
extern "C" {
fn __sfence_vma(asid: usize, addr: usize);
}
__sfence_vma(asid, addr);
}
#[cfg(not(riscv))]
() => unimplemented!(),
}
}
mod hypervisor_extension {
// Generating instructions for Hypervisor extension.
// There are two kinds of instructions: rs1/rs2 type and rs1/rd type.
// Also special register handling is required before LLVM could generate inline assembly for extended instructions.
macro_rules! instruction_hypervisor_extension {
(RS1_RS2, $(#[$attr:meta])*, $fnname:ident, $asm:expr, $asm_fn:ident) => (
$(#[$attr])*
#[inline]
#[allow(unused_variables)]
pub unsafe fn $fnname(rs1: usize, rs2: usize) {
match () {
#[cfg(all(riscv, feature = "inline-asm"))]
// Since LLVM does not recognize the two registers, we assume they are placed in a0 and a1, correspondingly.
() => core::arch::asm!($asm, in("x10") rs1, in("x11") rs2),
#[cfg(all(riscv, not(feature = "inline-asm")))]
() => {
extern "C" {
fn $asm_fn(rs1: usize, rs2: usize);
}
$asm_fn(rs1, rs2);
}
#[cfg(not(riscv))]
() => unimplemented!(),
}
}
);
(RS1_RD, $(#[$attr:meta])*, $fnname:ident, $asm:expr, $asm_fn:ident) => (
$(#[$attr])*
#[inline]
#[allow(unused_variables)]
pub unsafe fn $fnname(rs1: usize)->usize {
match () {
#[cfg(all(riscv, feature = "inline-asm"))]
() => {
let mut result : usize;
core::arch::asm!($asm, inlateout("x10") rs1 => result);
return result;
}
#[cfg(all(riscv, not(feature = "inline-asm")))]
() => {
extern "C" {
fn $asm_fn(rs1: usize)->usize;
}
return $asm_fn(rs1);
}
#[cfg(not(riscv))]
() => unimplemented!(),
}
}
)
}
instruction_hypervisor_extension!(RS1_RS2,,hfence_gvma,".word 1656029299",__hfence_gvma);
instruction_hypervisor_extension!(RS1_RS2,,hfence_vvma,".word 582287475",__hfence_vvma);
instruction_hypervisor_extension!(RS1_RD,,hlv_b,".word 1610958195",__hlv_b);
instruction_hypervisor_extension!(RS1_RD,,hlv_bu,".word 1612006771",__hlv_bu);
instruction_hypervisor_extension!(RS1_RD,,hlv_h,".word 1678067059",__hlv_h);
instruction_hypervisor_extension!(RS1_RD,,hlv_hu,".word 1679115635",__hlv_hu);
instruction_hypervisor_extension!(RS1_RD,,hlvx_hu,".word 1681212787",__hlvx_hu);
instruction_hypervisor_extension!(RS1_RD,,hlv_w,".word 1745175923",__hlv_w);
instruction_hypervisor_extension!(RS1_RD,,hlvx_wu,".word 1748321651",__hlvx_wu);
instruction_hypervisor_extension!(RS1_RS2,,hsv_b,".word 1656045683",__hsv_b);
instruction_hypervisor_extension!(RS1_RS2,,hsv_h,".word 1723154547",__hsv_h);
instruction_hypervisor_extension!(RS1_RS2,,hsv_w,".word 1790263411",__hsv_w);
instruction_hypervisor_extension!(RS1_RD,,hlv_wu,".word 1746224499",__hlv_wu);
instruction_hypervisor_extension!(RS1_RD,,hlv_d,".word 1812284787",__hlv_d);
instruction_hypervisor_extension!(RS1_RS2,,hsv_d,".word 1857372275",__hsv_d);
}
pub use self::hypervisor_extension::*;

View File

@@ -1,58 +0,0 @@
//! Interrupts
// NOTE: Adapted from cortex-m/src/interrupt.rs
pub use bare_metal::{CriticalSection, Mutex, Nr};
use register::mstatus;
/// Disables all interrupts
#[inline]
pub unsafe fn disable() {
match () {
#[cfg(riscv)]
() => mstatus::clear_mie(),
#[cfg(not(riscv))]
() => unimplemented!(),
}
}
/// Enables all the interrupts
///
/// # Safety
///
/// - Do not call this function inside an `interrupt::free` critical section
#[inline]
pub unsafe fn enable() {
match () {
#[cfg(riscv)]
() => mstatus::set_mie(),
#[cfg(not(riscv))]
() => unimplemented!(),
}
}
/// Execute closure `f` in an interrupt-free context.
///
/// This as also known as a "critical section".
pub fn free<F, R>(f: F) -> R
where
F: FnOnce(&CriticalSection) -> R,
{
let mstatus = mstatus::read();
// disable interrupts
unsafe {
disable();
}
let r = f(unsafe { &CriticalSection::new() });
// If the interrupts were active before our `disable` call, then re-enable
// them. Otherwise, keep them disabled
if mstatus.mie() {
unsafe {
enable();
}
}
r
}

View File

@@ -1,27 +0,0 @@
//! Low level access to RISC-V processors
//!
//! # Minimum Supported Rust Version (MSRV)
//!
//! This crate is guaranteed to compile on stable Rust 1.42 and up. It *might*
//! compile with older versions but that may change in any new patch release.
//!
//! # Features
//!
//! This crate provides:
//!
//! - Access to core registers like `mstatus` or `mcause`.
//! - Interrupt manipulation mechanisms.
//! - Wrappers around assembly instructions like `WFI`.
#![no_std]
#![cfg_attr(feature = "inline-asm", feature(asm_const))]
extern crate bare_metal;
#[macro_use]
extern crate bitflags;
extern crate bit_field;
pub mod addr;
pub mod asm;
pub mod interrupt;
pub mod paging;
pub mod register;

View File

@@ -1,40 +0,0 @@
//! Traits for abstracting away frame allocation and deallocation.
use addr::*;
/// A trait for types that can allocate a frame of memory.
pub trait FrameAllocatorFor<P: PhysicalAddress> {
/// Allocate a frame of the appropriate size and return it if possible.
fn alloc(&mut self) -> Option<FrameWith<P>>;
}
/// A trait for types that can deallocate a frame of memory.
pub trait FrameDeallocatorFor<P: PhysicalAddress> {
/// Deallocate the given frame of memory.
fn dealloc(&mut self, frame: FrameWith<P>);
}
/// Polyfill for default use cases.
#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
pub trait FrameAllocator {
fn alloc(&mut self) -> Option<Frame>;
}
#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
pub trait FrameDeallocator {
fn dealloc(&mut self, frame: Frame);
}
#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
impl<T: FrameAllocator> FrameAllocatorFor<PhysAddr> for T {
#[inline]
fn alloc(&mut self) -> Option<Frame> {
FrameAllocator::alloc(self)
}
}
#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
impl<T: FrameDeallocator> FrameDeallocatorFor<PhysAddr> for T {
#[inline]
fn dealloc(&mut self, frame: Frame) {
FrameDeallocator::dealloc(self, frame)
}
}

View File

@@ -1,136 +0,0 @@
use super::frame_alloc::*;
use super::page_table::*;
use addr::*;
pub trait Mapper {
type P: PhysicalAddress;
type V: VirtualAddress;
type MapperFlush: MapperFlushable;
type Entry: PTE;
/// Creates a new mapping in the page table.
///
/// This function might need additional physical frames to create new page tables. These
/// frames are allocated from the `allocator` argument. At most three frames are required.
fn map_to(
&mut self,
page: PageWith<Self::V>,
frame: FrameWith<Self::P>,
flags: PageTableFlags,
allocator: &mut impl FrameAllocatorFor<<Self as Mapper>::P>,
) -> Result<Self::MapperFlush, MapToError>;
/// Removes a mapping from the page table and returns the frame that used to be mapped.
///
/// Note that no page tables or pages are deallocated.
fn unmap(
&mut self,
page: PageWith<Self::V>,
) -> Result<(FrameWith<Self::P>, Self::MapperFlush), UnmapError<<Self as Mapper>::P>>;
/// Get the reference of the specified `page` entry
fn ref_entry(&mut self, page: PageWith<Self::V>) -> Result<&mut Self::Entry, FlagUpdateError>;
/// Updates the flags of an existing mapping.
fn update_flags(
&mut self,
page: PageWith<Self::V>,
flags: PageTableFlags,
) -> Result<Self::MapperFlush, FlagUpdateError> {
self.ref_entry(page).map(|e| {
e.set(e.frame::<Self::P>(), flags);
Self::MapperFlush::new(page)
})
}
/// Return the frame that the specified page is mapped to.
fn translate_page(&mut self, page: PageWith<Self::V>) -> Option<FrameWith<Self::P>> {
match self.ref_entry(page) {
Ok(e) => {
if e.is_unused() {
None
} else {
Some(e.frame())
}
}
Err(_) => None,
}
}
/// Maps the given frame to the virtual page with the same address.
fn identity_map(
&mut self,
frame: FrameWith<Self::P>,
flags: PageTableFlags,
allocator: &mut impl FrameAllocatorFor<<Self as Mapper>::P>,
) -> Result<Self::MapperFlush, MapToError> {
let page = PageWith::of_addr(Self::V::new(frame.start_address().as_usize()));
self.map_to(page, frame, flags, allocator)
}
}
pub trait MapperFlushable {
/// Create a new flush promise
fn new<T: VirtualAddress>(page: PageWith<T>) -> Self;
/// Flush the page from the TLB to ensure that the newest mapping is used.
fn flush(self);
/// Don't flush the TLB and silence the “must be used” warning.
fn ignore(self);
}
#[must_use = "Page Table changes must be flushed or ignored."]
pub struct MapperFlush(usize);
impl MapperFlushable for MapperFlush {
fn new<T: VirtualAddress>(page: PageWith<T>) -> Self {
MapperFlush(page.start_address().as_usize())
}
fn flush(self) {
unsafe {
crate::asm::sfence_vma(0, self.0);
}
}
fn ignore(self) {}
}
/// This error is returned from `map_to` and similar methods.
#[derive(Debug)]
pub enum MapToError {
/// An additional frame was needed for the mapping process, but the frame allocator
/// returned `None`.
FrameAllocationFailed,
/// An upper level page table entry has the `HUGE_PAGE` flag set, which means that the
/// given page is part of an already mapped huge page.
ParentEntryHugePage,
/// The given page is already mapped to a physical frame.
PageAlreadyMapped,
}
/// An error indicating that an `unmap` call failed.
#[derive(Debug)]
pub enum UnmapError<P: PhysicalAddress> {
/// An upper level page table entry has the `HUGE_PAGE` flag set, which means that the
/// given page is part of a huge page and can't be freed individually.
ParentEntryHugePage,
/// The given page is not mapped to a physical frame.
PageNotMapped,
/// The page table entry for the given page points to an invalid physical address.
InvalidFrameAddress(P),
}
/// An error indicating that an `update_flags` call failed.
#[derive(Debug)]
pub enum FlagUpdateError {
/// The given page is not mapped to a physical frame.
PageNotMapped,
}
pub trait MapperExt {
type Page;
type Frame;
}
impl<T: Mapper> MapperExt for T {
type Page = PageWith<<T as Mapper>::V>;
type Frame = FrameWith<<T as Mapper>::P>;
}

View File

@@ -1,13 +0,0 @@
mod frame_alloc;
mod mapper;
mod multi_level;
mod multi_level_x4;
mod page_table;
mod page_table_x4;
pub use self::frame_alloc::*;
pub use self::mapper::*;
pub use self::multi_level::*;
pub use self::multi_level_x4::*;
pub use self::page_table::*;
pub use self::page_table_x4::*;

View File

@@ -1,354 +0,0 @@
use super::frame_alloc::*;
use super::mapper::*;
use super::page_table::{PageTableFlags as F, *};
use crate::addr::*;
use core::marker::PhantomData;
/// This struct is a two level page table with `Mapper` trait implemented.
pub struct Rv32PageTableWith<'a, V: VirtualAddress + AddressL2, FL: MapperFlushable> {
root_table: &'a mut PageTableX32,
linear_offset: u64, // VA = PA + linear_offset
phantom: PhantomData<(V, FL)>,
}
impl<'a, V: VirtualAddress + AddressL2, FL: MapperFlushable> Rv32PageTableWith<'a, V, FL> {
pub fn new(table: &'a mut PageTableX32, linear_offset: usize) -> Self {
Rv32PageTableWith {
root_table: table,
linear_offset: linear_offset as u64,
phantom: PhantomData,
}
}
fn create_p1_if_not_exist(
&mut self,
p2_index: usize,
allocator: &mut impl FrameAllocatorFor<<Self as Mapper>::P>,
) -> Result<&mut PageTableX32, MapToError> {
if self.root_table[p2_index].is_unused() {
let frame = allocator.alloc().ok_or(MapToError::FrameAllocationFailed)?;
self.root_table[p2_index].set(frame.clone(), F::VALID);
let p1_table: &mut PageTableX32 = unsafe { frame.as_kernel_mut(self.linear_offset) };
p1_table.zero();
Ok(p1_table)
} else {
let frame = self.root_table[p2_index].frame::<PhysAddrSv32>();
let p1_table: &mut PageTableX32 = unsafe { frame.as_kernel_mut(self.linear_offset) };
Ok(p1_table)
}
}
}
impl<'a, V: VirtualAddress + AddressL2, FL: MapperFlushable> Mapper
for Rv32PageTableWith<'a, V, FL>
{
type P = PhysAddrSv32;
type V = V;
type MapperFlush = FL;
type Entry = PageTableEntryX32;
fn map_to(
&mut self,
page: <Self as MapperExt>::Page,
frame: <Self as MapperExt>::Frame,
flags: PageTableFlags,
allocator: &mut impl FrameAllocatorFor<<Self as Mapper>::P>,
) -> Result<Self::MapperFlush, MapToError> {
let p1_table = self.create_p1_if_not_exist(page.p2_index(), allocator)?;
if !p1_table[page.p1_index()].is_unused() {
return Err(MapToError::PageAlreadyMapped);
}
p1_table[page.p1_index()].set(frame, flags);
Ok(Self::MapperFlush::new(page))
}
fn unmap(
&mut self,
page: <Self as MapperExt>::Page,
) -> Result<(<Self as MapperExt>::Frame, Self::MapperFlush), UnmapError<<Self as Mapper>::P>>
{
if self.root_table[page.p2_index()].is_unused() {
return Err(UnmapError::PageNotMapped);
}
let p1_frame = self.root_table[page.p2_index()].frame::<PhysAddrSv32>();
let p1_table: &mut PageTableX32 = unsafe { p1_frame.as_kernel_mut(self.linear_offset) };
let p1_entry = &mut p1_table[page.p1_index()];
if !p1_entry.flags().contains(F::VALID) {
return Err(UnmapError::PageNotMapped);
}
let frame = p1_entry.frame();
p1_entry.set_unused();
Ok((frame, Self::MapperFlush::new(page)))
}
fn ref_entry(
&mut self,
page: <Self as MapperExt>::Page,
) -> Result<&mut PageTableEntryX32, FlagUpdateError> {
if self.root_table[page.p2_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p1_frame = self.root_table[page.p2_index()].frame::<PhysAddrSv32>();
let p1_table: &mut PageTableX32 = unsafe { p1_frame.as_kernel_mut(self.linear_offset) };
Ok(&mut p1_table[page.p1_index()])
}
}
/// This struct is a three level page table with `Mapper` trait implemented.
pub struct Rv39PageTableWith<'a, V: VirtualAddress + AddressL3, FL: MapperFlushable> {
root_table: &'a mut PageTableX64,
linear_offset: u64, // VA = PA + linear_offset
phantom: PhantomData<(V, FL)>,
}
impl<'a, V: VirtualAddress + AddressL3, FL: MapperFlushable> Rv39PageTableWith<'a, V, FL> {
pub fn new(table: &'a mut PageTableX64, linear_offset: usize) -> Self {
Rv39PageTableWith {
root_table: table,
linear_offset: linear_offset as u64,
phantom: PhantomData,
}
}
fn create_p1_if_not_exist(
&mut self,
p3_index: usize,
p2_index: usize,
allocator: &mut impl FrameAllocatorFor<<Self as Mapper>::P>,
) -> Result<&mut PageTableX64, MapToError> {
let p2_table = if self.root_table[p3_index].is_unused() {
let frame = allocator.alloc().ok_or(MapToError::FrameAllocationFailed)?;
self.root_table[p3_index].set(frame.clone(), F::VALID);
let p2_table: &mut PageTableX64 = unsafe { frame.as_kernel_mut(self.linear_offset) };
p2_table.zero();
p2_table
} else {
let frame = self.root_table[p3_index].frame::<PhysAddrSv39>();
unsafe { frame.as_kernel_mut(self.linear_offset) }
};
if p2_table[p2_index].is_unused() {
let frame = allocator.alloc().ok_or(MapToError::FrameAllocationFailed)?;
p2_table[p2_index].set(frame.clone(), F::VALID);
let p1_table: &mut PageTableX64 = unsafe { frame.as_kernel_mut(self.linear_offset) };
p1_table.zero();
Ok(p1_table)
} else {
let frame = p2_table[p2_index].frame::<PhysAddrSv39>();
let p1_table: &mut PageTableX64 = unsafe { frame.as_kernel_mut(self.linear_offset) };
Ok(p1_table)
}
}
}
impl<'a, V: VirtualAddress + AddressL3, FL: MapperFlushable> Mapper
for Rv39PageTableWith<'a, V, FL>
{
type P = PhysAddrSv39;
type V = V;
type MapperFlush = FL;
type Entry = PageTableEntryX64;
fn map_to(
&mut self,
page: <Self as MapperExt>::Page,
frame: <Self as MapperExt>::Frame,
flags: PageTableFlags,
allocator: &mut impl FrameAllocatorFor<<Self as Mapper>::P>,
) -> Result<Self::MapperFlush, MapToError> {
let p1_table = self.create_p1_if_not_exist(page.p3_index(), page.p2_index(), allocator)?;
if !p1_table[page.p1_index()].is_unused() {
return Err(MapToError::PageAlreadyMapped);
}
p1_table[page.p1_index()].set(frame, flags);
Ok(Self::MapperFlush::new(page))
}
fn unmap(
&mut self,
page: <Self as MapperExt>::Page,
) -> Result<(<Self as MapperExt>::Frame, Self::MapperFlush), UnmapError<<Self as Mapper>::P>>
{
if self.root_table[page.p3_index()].is_unused() {
return Err(UnmapError::PageNotMapped);
}
let p2_frame = self.root_table[page.p3_index()].frame::<PhysAddrSv39>();
let p2_table: &mut PageTableX64 = unsafe { p2_frame.as_kernel_mut(self.linear_offset) };
if p2_table[page.p2_index()].is_unused() {
return Err(UnmapError::PageNotMapped);
}
let p1_frame = p2_table[page.p2_index()].frame::<PhysAddrSv39>();
let p1_table: &mut PageTableX64 = unsafe { p1_frame.as_kernel_mut(self.linear_offset) };
let p1_entry = &mut p1_table[page.p1_index()];
if !p1_entry.flags().contains(F::VALID) {
return Err(UnmapError::PageNotMapped);
}
let frame = p1_entry.frame();
p1_entry.set_unused();
Ok((frame, Self::MapperFlush::new(page)))
}
fn ref_entry(
&mut self,
page: <Self as MapperExt>::Page,
) -> Result<&mut PageTableEntryX64, FlagUpdateError> {
if self.root_table[page.p3_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p2_frame = self.root_table[page.p3_index()].frame::<PhysAddrSv39>();
let p2_table: &mut PageTableX64 = unsafe { p2_frame.as_kernel_mut(self.linear_offset) };
if p2_table[page.p2_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p1_frame = p2_table[page.p2_index()].frame::<PhysAddrSv39>();
let p1_table: &mut PageTableX64 = unsafe { p1_frame.as_kernel_mut(self.linear_offset) };
Ok(&mut p1_table[page.p1_index()])
}
}
/// This struct is a four level page table with `Mapper` trait implemented.
pub struct Rv48PageTableWith<'a, V: VirtualAddress + AddressL4, FL: MapperFlushable> {
root_table: &'a mut PageTableX64,
linear_offset: u64, // VA = PA + linear_offset
phantom: PhantomData<(V, FL)>,
}
impl<'a, V: VirtualAddress + AddressL4, FL: MapperFlushable> Rv48PageTableWith<'a, V, FL> {
pub fn new(table: &'a mut PageTableX64, linear_offset: usize) -> Self {
Rv48PageTableWith {
root_table: table,
linear_offset: linear_offset as u64,
phantom: PhantomData,
}
}
fn create_p1_if_not_exist(
&mut self,
p4_index: usize,
p3_index: usize,
p2_index: usize,
allocator: &mut impl FrameAllocatorFor<<Self as Mapper>::P>,
) -> Result<&mut PageTableX64, MapToError> {
let p3_table = if self.root_table[p4_index].is_unused() {
let frame = allocator.alloc().ok_or(MapToError::FrameAllocationFailed)?;
self.root_table[p4_index].set(frame.clone(), F::VALID);
let p3_table: &mut PageTableX64 = unsafe { frame.as_kernel_mut(self.linear_offset) };
p3_table.zero();
p3_table
} else {
let frame = self.root_table[p4_index].frame::<PhysAddrSv48>();
unsafe { frame.as_kernel_mut(self.linear_offset) }
};
let p2_table = if p3_table[p3_index].is_unused() {
let frame = allocator.alloc().ok_or(MapToError::FrameAllocationFailed)?;
p3_table[p3_index].set(frame.clone(), F::VALID);
let p2_table: &mut PageTableX64 = unsafe { frame.as_kernel_mut(self.linear_offset) };
p2_table.zero();
p2_table
} else {
let frame = p3_table[p3_index].frame::<PhysAddrSv48>();
unsafe { frame.as_kernel_mut(self.linear_offset) }
};
if p2_table[p2_index].is_unused() {
let frame = allocator.alloc().ok_or(MapToError::FrameAllocationFailed)?;
p2_table[p2_index].set(frame.clone(), F::VALID);
let p1_table: &mut PageTableX64 = unsafe { frame.as_kernel_mut(self.linear_offset) };
p1_table.zero();
Ok(p1_table)
} else {
let frame = p2_table[p2_index].frame::<PhysAddrSv48>();
let p1_table: &mut PageTableX64 = unsafe { frame.as_kernel_mut(self.linear_offset) };
Ok(p1_table)
}
}
}
impl<'a, V: VirtualAddress + AddressL4, FL: MapperFlushable> Mapper
for Rv48PageTableWith<'a, V, FL>
{
type P = PhysAddrSv48;
type V = V;
type MapperFlush = FL;
type Entry = PageTableEntryX64;
fn map_to(
&mut self,
page: <Self as MapperExt>::Page,
frame: <Self as MapperExt>::Frame,
flags: PageTableFlags,
allocator: &mut impl FrameAllocatorFor<<Self as Mapper>::P>,
) -> Result<Self::MapperFlush, MapToError> {
let p1_table = self.create_p1_if_not_exist(
page.p4_index(),
page.p3_index(),
page.p2_index(),
allocator,
)?;
if !p1_table[page.p1_index()].is_unused() {
return Err(MapToError::PageAlreadyMapped);
}
p1_table[page.p1_index()].set(frame, flags);
Ok(Self::MapperFlush::new(page))
}
fn unmap(
&mut self,
page: <Self as MapperExt>::Page,
) -> Result<(<Self as MapperExt>::Frame, Self::MapperFlush), UnmapError<<Self as Mapper>::P>>
{
if self.root_table[page.p4_index()].is_unused() {
return Err(UnmapError::PageNotMapped);
}
let p3_frame = self.root_table[page.p4_index()].frame::<PhysAddrSv48>();
let p3_table: &mut PageTableX64 = unsafe { p3_frame.as_kernel_mut(self.linear_offset) };
if p3_table[page.p3_index()].is_unused() {
return Err(UnmapError::PageNotMapped);
}
let p2_frame = p3_table[page.p3_index()].frame::<PhysAddrSv48>();
let p2_table: &mut PageTableX64 = unsafe { p2_frame.as_kernel_mut(self.linear_offset) };
if p2_table[page.p2_index()].is_unused() {
return Err(UnmapError::PageNotMapped);
}
let p1_frame = p2_table[page.p2_index()].frame::<PhysAddrSv48>();
let p1_table: &mut PageTableX64 = unsafe { p1_frame.as_kernel_mut(self.linear_offset) };
let p1_entry = &mut p1_table[page.p1_index()];
if !p1_entry.flags().contains(F::VALID) {
return Err(UnmapError::PageNotMapped);
}
let frame = p1_entry.frame::<PhysAddrSv48>();
p1_entry.set_unused();
Ok((frame, Self::MapperFlush::new(page)))
}
fn ref_entry(
&mut self,
page: <Self as MapperExt>::Page,
) -> Result<&mut PageTableEntryX64, FlagUpdateError> {
if self.root_table[page.p4_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p3_frame = self.root_table[page.p4_index()].frame::<PhysAddrSv48>();
let p3_table: &mut PageTableX64 = unsafe { p3_frame.as_kernel_mut(self.linear_offset) };
if p3_table[page.p3_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p2_frame = p3_table[page.p3_index()].frame::<PhysAddrSv48>();
let p2_table: &mut PageTableX64 = unsafe { p2_frame.as_kernel_mut(self.linear_offset) };
if p2_table[page.p2_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p1_frame = p2_table[page.p2_index()].frame::<PhysAddrSv48>();
let p1_table: &mut PageTableX64 = unsafe { p1_frame.as_kernel_mut(self.linear_offset) };
Ok(&mut p1_table[page.p1_index()])
}
}
pub type Rv32PageTable<'a> = Rv32PageTableWith<'a, VirtAddrSv32, MapperFlush>;
pub type Rv39PageTable<'a> = Rv39PageTableWith<'a, VirtAddrSv39, MapperFlush>;
pub type Rv48PageTable<'a> = Rv48PageTableWith<'a, VirtAddrSv48, MapperFlush>;

View File

@@ -1,42 +0,0 @@
use crate::addr::*;
use crate::asm::{hfence_gvma, hfence_vvma};
use crate::paging::mapper::MapperFlushable;
use crate::paging::multi_level::Rv32PageTableWith;
use crate::paging::multi_level::{Rv39PageTableWith, Rv48PageTableWith};
#[must_use = "Guest Physical Address Table changes must be flushed or ignored."]
pub struct MapperFlushGPA(usize);
impl MapperFlushable for MapperFlushGPA {
fn new<T: VirtualAddress>(page: PageWith<T>) -> Self {
MapperFlushGPA(page.start_address().as_usize())
}
fn flush(self) {
unsafe {
hfence_gvma(self.0, 0);
}
}
fn ignore(self) {}
}
#[must_use = "Guest Page Table changes must be flushed or ignored."]
pub struct MapperFlushGPT(usize);
impl MapperFlushable for MapperFlushGPT {
fn new<T: VirtualAddress>(page: PageWith<T>) -> Self {
MapperFlushGPT(page.start_address().as_usize())
}
fn flush(self) {
unsafe {
hfence_vvma(self.0, 0);
}
}
fn ignore(self) {}
}
pub type Rv32PageTableX4<'a> = Rv32PageTableWith<'a, GPAddrSv32X4, MapperFlushGPA>;
pub type Rv39PageTableX4<'a> = Rv39PageTableWith<'a, GPAddrSv39X4, MapperFlushGPA>;
pub type Rv48PageTableX4<'a> = Rv48PageTableWith<'a, GPAddrSv48X4, MapperFlushGPA>;
pub type Rv32PageTableGuest<'a> = Rv32PageTableWith<'a, VirtAddrSv32, MapperFlushGPT>;
pub type Rv39PageTableGuest<'a> = Rv39PageTableWith<'a, VirtAddrSv39, MapperFlushGPT>;
pub type Rv48PageTableGuest<'a> = Rv48PageTableWith<'a, VirtAddrSv48, MapperFlushGPT>;

View File

@@ -1,252 +0,0 @@
use addr::*;
use core::convert::TryInto;
use core::fmt::{Debug, Error, Formatter};
use core::marker::PhantomData;
use core::ops::{Index, IndexMut};
pub type Entries32 = [PageTableEntryX32; RV32_ENTRY_COUNT];
pub type Entries64 = [PageTableEntryX64; RV64_ENTRY_COUNT];
// To avoid const generic.
pub trait PTEIterableSlice<T> {
fn to_pte_slice<'a>(&'a self) -> &'a [T];
fn to_pte_slice_mut<'a>(&'a mut self) -> &'a mut [T];
fn pte_index(&self, index: usize) -> &T;
fn pte_index_mut(&mut self, index: usize) -> &mut T;
}
impl PTEIterableSlice<PageTableEntryX32> for Entries32 {
fn to_pte_slice(&self) -> &[PageTableEntryX32] {
self
}
fn to_pte_slice_mut(&mut self) -> &mut [PageTableEntryX32] {
self
}
fn pte_index(&self, index: usize) -> &PageTableEntryX32 {
&self[index]
}
fn pte_index_mut(&mut self, index: usize) -> &mut PageTableEntryX32 {
&mut self[index]
}
}
impl PTEIterableSlice<PageTableEntryX64> for Entries64 {
fn to_pte_slice(&self) -> &[PageTableEntryX64] {
self
}
fn to_pte_slice_mut(&mut self) -> &mut [PageTableEntryX64] {
self
}
fn pte_index(&self, index: usize) -> &PageTableEntryX64 {
&self[index]
}
fn pte_index_mut(&mut self, index: usize) -> &mut PageTableEntryX64 {
&mut self[index]
}
}
#[repr(C)]
pub struct PageTableWith<T: PTEIterableSlice<E>, E: PTE> {
entries: T,
phantom: PhantomData<E>,
}
impl<T: PTEIterableSlice<E>, E: PTE> PageTableWith<T, E> {
/// Clears all entries.
pub fn zero(&mut self) {
for entry in self.entries.to_pte_slice_mut().iter_mut() {
entry.set_unused();
}
}
}
impl<T: PTEIterableSlice<E>, E: PTE> Index<usize> for PageTableWith<T, E> {
type Output = E;
fn index(&self, index: usize) -> &Self::Output {
self.entries.pte_index(index)
}
}
impl<T: PTEIterableSlice<E>, E: PTE> IndexMut<usize> for PageTableWith<T, E> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
self.entries.pte_index_mut(index)
}
}
impl<T: PTEIterableSlice<E>, E: PTE + Debug> Debug for PageTableWith<T, E> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
f.debug_map()
.entries(
self.entries
.to_pte_slice()
.iter()
.enumerate()
.filter(|p| !p.1.is_unused()),
)
.finish()
}
}
pub trait PTE {
fn is_unused(&self) -> bool;
fn set_unused(&mut self);
fn flags(&self) -> PageTableFlags;
fn ppn(&self) -> usize;
fn ppn_u64(&self) -> u64;
fn addr<T: PhysicalAddress>(&self) -> T;
fn frame<T: PhysicalAddress>(&self) -> FrameWith<T>;
fn set<T: PhysicalAddress>(&mut self, frame: FrameWith<T>, flags: PageTableFlags);
fn flags_mut(&mut self) -> &mut PageTableFlags;
}
#[derive(Copy, Clone)]
#[repr(C)]
pub struct PageTableEntryX32(u32);
impl PTE for PageTableEntryX32 {
fn is_unused(&self) -> bool {
self.0 == 0
}
fn set_unused(&mut self) {
self.0 = 0;
}
fn flags(&self) -> PageTableFlags {
PageTableFlags::from_bits_truncate(self.0 as usize)
}
fn ppn(&self) -> usize {
self.ppn_u64().try_into().unwrap()
}
fn ppn_u64(&self) -> u64 {
(self.0 >> 10) as u64
}
fn addr<T: PhysicalAddress>(&self) -> T {
T::new_u64((self.ppn() as u64) << 12)
}
fn frame<T: PhysicalAddress>(&self) -> FrameWith<T> {
FrameWith::of_addr(self.addr())
}
fn set<T: PhysicalAddress>(&mut self, frame: FrameWith<T>, mut flags: PageTableFlags) {
// U540 will raise page fault when accessing page with A=0 or D=0
flags |= EF::ACCESSED | EF::DIRTY;
self.0 = ((frame.number() << 10) | flags.bits()) as u32;
}
fn flags_mut(&mut self) -> &mut PageTableFlags {
unsafe { &mut *(self as *mut _ as *mut PageTableFlags) }
}
}
impl Debug for PageTableEntryX32 {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
f.debug_struct("PageTableEntryX32")
.field("frame", &self.frame::<PhysAddrSv32>())
.field("flags", &self.flags())
.finish()
}
}
#[derive(Copy, Clone)]
pub struct PageTableEntryX64(u64);
impl PTE for PageTableEntryX64 {
fn is_unused(&self) -> bool {
self.0 == 0
}
fn set_unused(&mut self) {
self.0 = 0;
}
fn flags(&self) -> PageTableFlags {
PageTableFlags::from_bits_truncate(self.0 as usize)
}
fn ppn(&self) -> usize {
self.ppn_u64().try_into().unwrap()
}
fn ppn_u64(&self) -> u64 {
(self.0 >> 10) as u64
}
fn addr<T: PhysicalAddress>(&self) -> T {
T::new_u64((self.ppn() as u64) << 12)
}
fn frame<T: PhysicalAddress>(&self) -> FrameWith<T> {
FrameWith::of_addr(self.addr())
}
fn set<T: PhysicalAddress>(&mut self, frame: FrameWith<T>, mut flags: PageTableFlags) {
// U540 will raise page fault when accessing page with A=0 or D=0
flags |= EF::ACCESSED | EF::DIRTY;
self.0 = ((frame.number() << 10) | flags.bits()) as u64;
}
fn flags_mut(&mut self) -> &mut PageTableFlags {
unsafe { &mut *(self as *mut _ as *mut PageTableFlags) }
}
}
pub struct PageTableEntryX64Printer<'a, P: PhysicalAddress>(
&'a PageTableEntryX64,
PhantomData<*const P>,
);
impl<'a, P: PhysicalAddress> Debug for PageTableEntryX64Printer<'a, P> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
f.debug_struct("PageTableEntryX64")
.field("frame", &self.0.frame::<P>())
.field("flags", &self.0.flags())
.finish()
}
}
impl PageTableEntryX64 {
pub fn debug_sv39<'a>(&'a self) -> PageTableEntryX64Printer<'a, PhysAddrSv39> {
PageTableEntryX64Printer(self, PhantomData)
}
pub fn debug_sv48<'a>(&'a self) -> PageTableEntryX64Printer<'a, PhysAddrSv48> {
PageTableEntryX64Printer(self, PhantomData)
}
}
impl Debug for PageTableEntryX64 {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
self.debug_sv48().fmt(f)
}
}
pub const RV64_ENTRY_COUNT: usize = 1 << 9;
pub const RV32_ENTRY_COUNT: usize = 1 << 10;
#[cfg(riscv64)]
pub const ENTRY_COUNT: usize = RV64_ENTRY_COUNT;
#[cfg(riscv32)]
pub const ENTRY_COUNT: usize = RV32_ENTRY_COUNT;
#[cfg(riscv64)]
pub type PageTableEntry = PageTableEntryX64;
#[cfg(riscv32)]
pub type PageTableEntry = PageTableEntryX32;
#[cfg(riscv64)]
pub type Entries = Entries64;
#[cfg(riscv32)]
pub type Entries = Entries32;
#[cfg(not(any(riscv32, riscv64)))]
pub const ENTRY_COUNT: usize = 1 << 0;
#[cfg(not(any(riscv32, riscv64)))]
pub type Entries = Entries64;
pub type PageTableX32 = PageTableWith<Entries32, PageTableEntryX32>;
pub type PageTableX64 = PageTableWith<Entries64, PageTableEntryX64>;
#[cfg(riscv64)]
pub type PageTable = PageTableX64;
#[cfg(riscv32)]
pub type PageTable = PageTableX32;
bitflags! {
/// Possible flags for a page table entry.
pub struct PageTableFlags: usize {
const VALID = 1 << 0;
const READABLE = 1 << 1;
const WRITABLE = 1 << 2;
const EXECUTABLE = 1 << 3;
const USER = 1 << 4;
const GLOBAL = 1 << 5;
const ACCESSED = 1 << 6;
const DIRTY = 1 << 7;
const RESERVED1 = 1 << 8;
const RESERVED2 = 1 << 9;
}
}
type EF = PageTableFlags;

View File

@@ -1,44 +0,0 @@
/// This file is for Hypervisor-related x4 page tables, including Sv32x4, Sv39x4 and Sv48x4.
/// In fact, these x4 page tables are Phys-to-Phys page tables from GPAs to real PAs.
use super::page_table::{
PTEIterableSlice, PageTableEntryX32, PageTableEntryX64, PageTableWith, RV32_ENTRY_COUNT,
RV64_ENTRY_COUNT,
};
// The root page table is 4 times larger.
pub const RV32_X4_ENTRY_COUNT: usize = RV32_ENTRY_COUNT << 2;
pub const RV64_X4_ENTRY_COUNT: usize = RV64_ENTRY_COUNT << 2;
pub type Entries32X4 = [PageTableEntryX32; RV32_X4_ENTRY_COUNT];
pub type Entries64X4 = [PageTableEntryX64; RV64_X4_ENTRY_COUNT];
impl PTEIterableSlice<PageTableEntryX32> for Entries32X4 {
fn to_pte_slice(&self) -> &[PageTableEntryX32] {
self
}
fn to_pte_slice_mut(&mut self) -> &mut [PageTableEntryX32] {
self
}
fn pte_index(&self, index: usize) -> &PageTableEntryX32 {
&self[index]
}
fn pte_index_mut(&mut self, index: usize) -> &mut PageTableEntryX32 {
&mut self[index]
}
}
impl PTEIterableSlice<PageTableEntryX64> for Entries64X4 {
fn to_pte_slice(&self) -> &[PageTableEntryX64] {
self
}
fn to_pte_slice_mut(&mut self) -> &mut [PageTableEntryX64] {
self
}
fn pte_index(&self, index: usize) -> &PageTableEntryX64 {
&self[index]
}
fn pte_index_mut(&mut self, index: usize) -> &mut PageTableEntryX64 {
&mut self[index]
}
}
pub type PageTable32X4 = PageTableWith<Entries32X4, PageTableEntryX32>;
pub type PageTable64X4 = PageTableWith<Entries64X4, PageTableEntryX64>;

View File

@@ -1,134 +0,0 @@
//! Floating-point control and status register
use bit_field::BitField;
/// Floating-point control and status register
#[derive(Clone, Copy, Debug)]
pub struct FCSR {
bits: u32,
}
/// Accrued Exception Flags
#[derive(Clone, Copy, Debug)]
pub struct Flags(u32);
/// Accrued Exception Flag
#[derive(Clone, Copy, Debug)]
pub enum Flag {
/// Inexact
NX = 0b00001,
/// Underflow
UF = 0b00010,
/// Overflow
OF = 0b00100,
/// Divide by Zero
DZ = 0b01000,
/// Invalid Operation
NV = 0b10000,
}
impl Flags {
/// Inexact
#[inline]
pub fn nx(&self) -> bool {
self.0.get_bit(0)
}
/// Underflow
#[inline]
pub fn uf(&self) -> bool {
self.0.get_bit(1)
}
/// Overflow
#[inline]
pub fn of(&self) -> bool {
self.0.get_bit(2)
}
/// Divide by Zero
#[inline]
pub fn dz(&self) -> bool {
self.0.get_bit(3)
}
/// Invalid Operation
#[inline]
pub fn nv(&self) -> bool {
self.0.get_bit(4)
}
}
/// Rounding Mode
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum RoundingMode {
RoundToNearestEven = 0b000,
RoundTowardsZero = 0b001,
RoundDown = 0b010,
RoundUp = 0b011,
RoundToNearestMaxMagnitude = 0b100,
Invalid = 0b111,
}
impl FCSR {
/// Returns the contents of the register as raw bits
pub fn bits(&self) -> u32 {
self.bits
}
/// Accrued Exception Flags
#[inline]
pub fn fflags(&self) -> Flags {
Flags(self.bits.get_bits(0..5))
}
/// Rounding Mode
#[inline]
pub fn frm(&self) -> RoundingMode {
match self.bits.get_bits(5..8) {
0b000 => RoundingMode::RoundToNearestEven,
0b001 => RoundingMode::RoundTowardsZero,
0b010 => RoundingMode::RoundDown,
0b011 => RoundingMode::RoundUp,
0b100 => RoundingMode::RoundToNearestMaxMagnitude,
_ => RoundingMode::Invalid,
}
}
}
read_csr!(0x003, __read_fcsr);
write_csr!(0x003, __write_fcsr);
clear!(0x003, __clear_fcsr);
/// Reads the CSR
#[inline]
pub fn read() -> FCSR {
FCSR {
bits: unsafe { _read() as u32 },
}
}
/// Writes the CSR
#[inline]
pub unsafe fn set_rounding_mode(frm: RoundingMode) {
let old = read();
let bits = ((frm as u32) << 5) | old.fflags().0;
_write(bits as usize);
}
/// Resets `fflags` field bits
#[inline]
pub unsafe fn clear_flags() {
let mask = 0b11111;
_clear(mask);
}
/// Resets `fflags` field bit
#[inline]
pub unsafe fn clear_flag(flag: Flag) {
_clear(flag as usize);
}

View File

@@ -1,82 +0,0 @@
macro_rules! reg {
(
$addr:expr, $csrl:ident, $csrh:ident, $readf:ident, $writef:ident
) => {
/// Performance-monitoring counter
pub mod $csrl {
read_csr_as_usize!($addr, $readf);
read_composite_csr!(super::$csrh::read(), read());
}
}
}
macro_rules! regh {
(
$addr:expr, $csrh:ident, $readf:ident, $writef:ident
) => {
/// Upper 32 bits of performance-monitoring counter (RV32I only)
pub mod $csrh {
read_csr_as_usize_rv32!($addr, $readf);
}
}
}
reg!(0xC03, hpmcounter3, hpmcounter3h, __read_hpmcounter3, __write_hpmcounter3);
reg!(0xC04, hpmcounter4, hpmcounter4h, __read_hpmcounter4, __write_hpmcounter4);
reg!(0xC05, hpmcounter5, hpmcounter5h, __read_hpmcounter5, __write_hpmcounter5);
reg!(0xC06, hpmcounter6, hpmcounter6h, __read_hpmcounter6, __write_hpmcounter6);
reg!(0xC07, hpmcounter7, hpmcounter7h, __read_hpmcounter7, __write_hpmcounter7);
reg!(0xC08, hpmcounter8, hpmcounter8h, __read_hpmcounter8, __write_hpmcounter8);
reg!(0xC09, hpmcounter9, hpmcounter9h, __read_hpmcounter9, __write_hpmcounter9);
reg!(0xC0A, hpmcounter10, hpmcounter10h, __read_hpmcounter10, __write_hpmcounter10);
reg!(0xC0B, hpmcounter11, hpmcounter11h, __read_hpmcounter11, __write_hpmcounter11);
reg!(0xC0C, hpmcounter12, hpmcounter12h, __read_hpmcounter12, __write_hpmcounter12);
reg!(0xC0D, hpmcounter13, hpmcounter13h, __read_hpmcounter13, __write_hpmcounter13);
reg!(0xC0E, hpmcounter14, hpmcounter14h, __read_hpmcounter14, __write_hpmcounter14);
reg!(0xC0F, hpmcounter15, hpmcounter15h, __read_hpmcounter15, __write_hpmcounter15);
reg!(0xC10, hpmcounter16, hpmcounter16h, __read_hpmcounter16, __write_hpmcounter16);
reg!(0xC11, hpmcounter17, hpmcounter17h, __read_hpmcounter17, __write_hpmcounter17);
reg!(0xC12, hpmcounter18, hpmcounter18h, __read_hpmcounter18, __write_hpmcounter18);
reg!(0xC13, hpmcounter19, hpmcounter19h, __read_hpmcounter19, __write_hpmcounter19);
reg!(0xC14, hpmcounter20, hpmcounter20h, __read_hpmcounter20, __write_hpmcounter20);
reg!(0xC15, hpmcounter21, hpmcounter21h, __read_hpmcounter21, __write_hpmcounter21);
reg!(0xC16, hpmcounter22, hpmcounter22h, __read_hpmcounter22, __write_hpmcounter22);
reg!(0xC17, hpmcounter23, hpmcounter23h, __read_hpmcounter23, __write_hpmcounter23);
reg!(0xC18, hpmcounter24, hpmcounter24h, __read_hpmcounter24, __write_hpmcounter24);
reg!(0xC19, hpmcounter25, hpmcounter25h, __read_hpmcounter25, __write_hpmcounter25);
reg!(0xC1A, hpmcounter26, hpmcounter26h, __read_hpmcounter26, __write_hpmcounter26);
reg!(0xC1B, hpmcounter27, hpmcounter27h, __read_hpmcounter27, __write_hpmcounter27);
reg!(0xC1C, hpmcounter28, hpmcounter28h, __read_hpmcounter28, __write_hpmcounter28);
reg!(0xC1D, hpmcounter29, hpmcounter29h, __read_hpmcounter29, __write_hpmcounter29);
reg!(0xC1E, hpmcounter30, hpmcounter30h, __read_hpmcounter30, __write_hpmcounter30);
reg!(0xC1F, hpmcounter31, hpmcounter31h, __read_hpmcounter31, __write_hpmcounter31);
regh!(0xC83, hpmcounter3h, __read_hpmcounter3h, __write_hpmcounter3h);
regh!(0xC84, hpmcounter4h, __read_hpmcounter4h, __write_hpmcounter4h);
regh!(0xC85, hpmcounter5h, __read_hpmcounter5h, __write_hpmcounter5h);
regh!(0xC86, hpmcounter6h, __read_hpmcounter6h, __write_hpmcounter6h);
regh!(0xC87, hpmcounter7h, __read_hpmcounter7h, __write_hpmcounter7h);
regh!(0xC88, hpmcounter8h, __read_hpmcounter8h, __write_hpmcounter8h);
regh!(0xC89, hpmcounter9h, __read_hpmcounter9h, __write_hpmcounter9h);
regh!(0xC8A, hpmcounter10h, __read_hpmcounter10h, __write_hpmcounter10h);
regh!(0xC8B, hpmcounter11h, __read_hpmcounter11h, __write_hpmcounter11h);
regh!(0xC8C, hpmcounter12h, __read_hpmcounter12h, __write_hpmcounter12h);
regh!(0xC8D, hpmcounter13h, __read_hpmcounter13h, __write_hpmcounter13h);
regh!(0xC8E, hpmcounter14h, __read_hpmcounter14h, __write_hpmcounter14h);
regh!(0xC8F, hpmcounter15h, __read_hpmcounter15h, __write_hpmcounter15h);
regh!(0xC90, hpmcounter16h, __read_hpmcounter16h, __write_hpmcounter16h);
regh!(0xC91, hpmcounter17h, __read_hpmcounter17h, __write_hpmcounter17h);
regh!(0xC92, hpmcounter18h, __read_hpmcounter18h, __write_hpmcounter18h);
regh!(0xC93, hpmcounter19h, __read_hpmcounter19h, __write_hpmcounter19h);
regh!(0xC94, hpmcounter20h, __read_hpmcounter20h, __write_hpmcounter20h);
regh!(0xC95, hpmcounter21h, __read_hpmcounter21h, __write_hpmcounter21h);
regh!(0xC96, hpmcounter22h, __read_hpmcounter22h, __write_hpmcounter22h);
regh!(0xC97, hpmcounter23h, __read_hpmcounter23h, __write_hpmcounter23h);
regh!(0xC98, hpmcounter24h, __read_hpmcounter24h, __write_hpmcounter24h);
regh!(0xC99, hpmcounter25h, __read_hpmcounter25h, __write_hpmcounter25h);
regh!(0xC9A, hpmcounter26h, __read_hpmcounter26h, __write_hpmcounter26h);
regh!(0xC9B, hpmcounter27h, __read_hpmcounter27h, __write_hpmcounter27h);
regh!(0xC9C, hpmcounter28h, __read_hpmcounter28h, __write_hpmcounter28h);
regh!(0xC9D, hpmcounter29h, __read_hpmcounter29h, __write_hpmcounter29h);
regh!(0xC9E, hpmcounter30h, __read_hpmcounter30h, __write_hpmcounter30h);
regh!(0xC9F, hpmcounter31h, __read_hpmcounter31h, __write_hpmcounter31h);

View File

@@ -1,413 +0,0 @@
//! Hypervisor Guest External Interrupt Pending Register.
use bit_field::BitField;
#[derive(Copy, Clone, Debug)]
pub struct Hcounteren {
bits: usize,
}
impl Hcounteren {
#[inline]
pub fn bits(&self) -> usize {
return self.bits;
}
#[inline]
pub fn from_bits(x: usize) -> Self {
return Hcounteren { bits: x };
}
#[inline]
pub unsafe fn write(&self) {
_write(self.bits);
}
///
#[inline]
pub fn cy(&self) -> bool {
self.bits.get_bit(0)
}
#[inline]
pub fn set_cy(&mut self, val: bool) {
self.bits.set_bit(0, val);
}
///
#[inline]
pub fn tm(&self) -> bool {
self.bits.get_bit(1)
}
#[inline]
pub fn set_tm(&mut self, val: bool) {
self.bits.set_bit(1, val);
}
///
#[inline]
pub fn ir(&self) -> bool {
self.bits.get_bit(2)
}
#[inline]
pub fn set_ir(&mut self, val: bool) {
self.bits.set_bit(2, val);
}
///
#[inline]
pub fn hpm3(&self) -> bool {
self.bits.get_bit(3)
}
#[inline]
pub fn set_hpm3(&mut self, val: bool) {
self.bits.set_bit(3, val);
}
///
#[inline]
pub fn hpm4(&self) -> bool {
self.bits.get_bit(4)
}
#[inline]
pub fn set_hpm4(&mut self, val: bool) {
self.bits.set_bit(4, val);
}
///
#[inline]
pub fn hpm5(&self) -> bool {
self.bits.get_bit(5)
}
#[inline]
pub fn set_hpm5(&mut self, val: bool) {
self.bits.set_bit(5, val);
}
///
#[inline]
pub fn hpm6(&self) -> bool {
self.bits.get_bit(6)
}
#[inline]
pub fn set_hpm6(&mut self, val: bool) {
self.bits.set_bit(6, val);
}
///
#[inline]
pub fn hpm7(&self) -> bool {
self.bits.get_bit(7)
}
#[inline]
pub fn set_hpm7(&mut self, val: bool) {
self.bits.set_bit(7, val);
}
///
#[inline]
pub fn hpm8(&self) -> bool {
self.bits.get_bit(8)
}
#[inline]
pub fn set_hpm8(&mut self, val: bool) {
self.bits.set_bit(8, val);
}
///
#[inline]
pub fn hpm9(&self) -> bool {
self.bits.get_bit(9)
}
#[inline]
pub fn set_hpm9(&mut self, val: bool) {
self.bits.set_bit(9, val);
}
///
#[inline]
pub fn hpm10(&self) -> bool {
self.bits.get_bit(10)
}
#[inline]
pub fn set_hpm10(&mut self, val: bool) {
self.bits.set_bit(10, val);
}
///
#[inline]
pub fn hpm11(&self) -> bool {
self.bits.get_bit(11)
}
#[inline]
pub fn set_hpm11(&mut self, val: bool) {
self.bits.set_bit(11, val);
}
///
#[inline]
pub fn hpm12(&self) -> bool {
self.bits.get_bit(12)
}
#[inline]
pub fn set_hpm12(&mut self, val: bool) {
self.bits.set_bit(12, val);
}
///
#[inline]
pub fn hpm13(&self) -> bool {
self.bits.get_bit(13)
}
#[inline]
pub fn set_hpm13(&mut self, val: bool) {
self.bits.set_bit(13, val);
}
///
#[inline]
pub fn hpm14(&self) -> bool {
self.bits.get_bit(14)
}
#[inline]
pub fn set_hpm14(&mut self, val: bool) {
self.bits.set_bit(14, val);
}
///
#[inline]
pub fn hpm15(&self) -> bool {
self.bits.get_bit(15)
}
#[inline]
pub fn set_hpm15(&mut self, val: bool) {
self.bits.set_bit(15, val);
}
///
#[inline]
pub fn hpm16(&self) -> bool {
self.bits.get_bit(16)
}
#[inline]
pub fn set_hpm16(&mut self, val: bool) {
self.bits.set_bit(16, val);
}
///
#[inline]
pub fn hpm17(&self) -> bool {
self.bits.get_bit(17)
}
#[inline]
pub fn set_hpm17(&mut self, val: bool) {
self.bits.set_bit(17, val);
}
///
#[inline]
pub fn hpm18(&self) -> bool {
self.bits.get_bit(18)
}
#[inline]
pub fn set_hpm18(&mut self, val: bool) {
self.bits.set_bit(18, val);
}
///
#[inline]
pub fn hpm19(&self) -> bool {
self.bits.get_bit(19)
}
#[inline]
pub fn set_hpm19(&mut self, val: bool) {
self.bits.set_bit(19, val);
}
///
#[inline]
pub fn hpm20(&self) -> bool {
self.bits.get_bit(20)
}
#[inline]
pub fn set_hpm20(&mut self, val: bool) {
self.bits.set_bit(20, val);
}
///
#[inline]
pub fn hpm21(&self) -> bool {
self.bits.get_bit(21)
}
#[inline]
pub fn set_hpm21(&mut self, val: bool) {
self.bits.set_bit(21, val);
}
///
#[inline]
pub fn hpm22(&self) -> bool {
self.bits.get_bit(22)
}
#[inline]
pub fn set_hpm22(&mut self, val: bool) {
self.bits.set_bit(22, val);
}
///
#[inline]
pub fn hpm23(&self) -> bool {
self.bits.get_bit(23)
}
#[inline]
pub fn set_hpm23(&mut self, val: bool) {
self.bits.set_bit(23, val);
}
///
#[inline]
pub fn hpm24(&self) -> bool {
self.bits.get_bit(24)
}
#[inline]
pub fn set_hpm24(&mut self, val: bool) {
self.bits.set_bit(24, val);
}
///
#[inline]
pub fn hpm25(&self) -> bool {
self.bits.get_bit(25)
}
#[inline]
pub fn set_hpm25(&mut self, val: bool) {
self.bits.set_bit(25, val);
}
///
#[inline]
pub fn hpm26(&self) -> bool {
self.bits.get_bit(26)
}
#[inline]
pub fn set_hpm26(&mut self, val: bool) {
self.bits.set_bit(26, val);
}
///
#[inline]
pub fn hpm27(&self) -> bool {
self.bits.get_bit(27)
}
#[inline]
pub fn set_hpm27(&mut self, val: bool) {
self.bits.set_bit(27, val);
}
///
#[inline]
pub fn hpm28(&self) -> bool {
self.bits.get_bit(28)
}
#[inline]
pub fn set_hpm28(&mut self, val: bool) {
self.bits.set_bit(28, val);
}
///
#[inline]
pub fn hpm29(&self) -> bool {
self.bits.get_bit(29)
}
#[inline]
pub fn set_hpm29(&mut self, val: bool) {
self.bits.set_bit(29, val);
}
///
#[inline]
pub fn hpm30(&self) -> bool {
self.bits.get_bit(30)
}
#[inline]
pub fn set_hpm30(&mut self, val: bool) {
self.bits.set_bit(30, val);
}
///
#[inline]
pub fn hpm31(&self) -> bool {
self.bits.get_bit(31)
}
#[inline]
pub fn set_hpm31(&mut self, val: bool) {
self.bits.set_bit(31, val);
}
}
read_csr_as!(Hcounteren, 3602, __read_hcounteren);
write_csr!(3602, __write_hcounteren);
set!(3602, __set_hcounteren);
clear!(3602, __clear_hcounteren);
// bit ops
set_clear_csr!(
///
, set_cy, clear_cy, 1 << 0);
set_clear_csr!(
///
, set_tm, clear_tm, 1 << 1);
set_clear_csr!(
///
, set_ir, clear_ir, 1 << 2);
set_clear_csr!(
///
, set_hpm3, clear_hpm3, 1 << 3);
set_clear_csr!(
///
, set_hpm4, clear_hpm4, 1 << 4);
set_clear_csr!(
///
, set_hpm5, clear_hpm5, 1 << 5);
set_clear_csr!(
///
, set_hpm6, clear_hpm6, 1 << 6);
set_clear_csr!(
///
, set_hpm7, clear_hpm7, 1 << 7);
set_clear_csr!(
///
, set_hpm8, clear_hpm8, 1 << 8);
set_clear_csr!(
///
, set_hpm9, clear_hpm9, 1 << 9);
set_clear_csr!(
///
, set_hpm10, clear_hpm10, 1 << 10);
set_clear_csr!(
///
, set_hpm11, clear_hpm11, 1 << 11);
set_clear_csr!(
///
, set_hpm12, clear_hpm12, 1 << 12);
set_clear_csr!(
///
, set_hpm13, clear_hpm13, 1 << 13);
set_clear_csr!(
///
, set_hpm14, clear_hpm14, 1 << 14);
set_clear_csr!(
///
, set_hpm15, clear_hpm15, 1 << 15);
set_clear_csr!(
///
, set_hpm16, clear_hpm16, 1 << 16);
set_clear_csr!(
///
, set_hpm17, clear_hpm17, 1 << 17);
set_clear_csr!(
///
, set_hpm18, clear_hpm18, 1 << 18);
set_clear_csr!(
///
, set_hpm19, clear_hpm19, 1 << 19);
set_clear_csr!(
///
, set_hpm20, clear_hpm20, 1 << 20);
set_clear_csr!(
///
, set_hpm21, clear_hpm21, 1 << 21);
set_clear_csr!(
///
, set_hpm22, clear_hpm22, 1 << 22);
set_clear_csr!(
///
, set_hpm23, clear_hpm23, 1 << 23);
set_clear_csr!(
///
, set_hpm24, clear_hpm24, 1 << 24);
set_clear_csr!(
///
, set_hpm25, clear_hpm25, 1 << 25);
set_clear_csr!(
///
, set_hpm26, clear_hpm26, 1 << 26);
set_clear_csr!(
///
, set_hpm27, clear_hpm27, 1 << 27);
set_clear_csr!(
///
, set_hpm28, clear_hpm28, 1 << 28);
set_clear_csr!(
///
, set_hpm29, clear_hpm29, 1 << 29);
set_clear_csr!(
///
, set_hpm30, clear_hpm30, 1 << 30);
set_clear_csr!(
///
, set_hpm31, clear_hpm31, 1 << 31);
// enums

View File

@@ -1,173 +0,0 @@
//! Hypervisor Exception Delegation Register.
use bit_field::BitField;
#[derive(Copy, Clone, Debug)]
pub struct Hedeleg {
bits: usize,
}
impl Hedeleg {
#[inline]
pub fn bits(&self) -> usize {
return self.bits;
}
#[inline]
pub fn from_bits(x: usize) -> Self {
return Hedeleg { bits: x };
}
#[inline]
pub unsafe fn write(&self) {
_write(self.bits);
}
/// Instruction address misaligned
#[inline]
pub fn ex0(&self) -> bool {
self.bits.get_bit(0)
}
#[inline]
pub fn set_ex0(&mut self, val: bool) {
self.bits.set_bit(0, val);
}
/// Instruction access fault
#[inline]
pub fn ex1(&self) -> bool {
self.bits.get_bit(1)
}
#[inline]
pub fn set_ex1(&mut self, val: bool) {
self.bits.set_bit(1, val);
}
/// Illegal instruction
#[inline]
pub fn ex2(&self) -> bool {
self.bits.get_bit(2)
}
#[inline]
pub fn set_ex2(&mut self, val: bool) {
self.bits.set_bit(2, val);
}
/// Breakpoint
#[inline]
pub fn ex3(&self) -> bool {
self.bits.get_bit(3)
}
#[inline]
pub fn set_ex3(&mut self, val: bool) {
self.bits.set_bit(3, val);
}
/// Load address misaligned
#[inline]
pub fn ex4(&self) -> bool {
self.bits.get_bit(4)
}
#[inline]
pub fn set_ex4(&mut self, val: bool) {
self.bits.set_bit(4, val);
}
/// Load access fault
#[inline]
pub fn ex5(&self) -> bool {
self.bits.get_bit(5)
}
#[inline]
pub fn set_ex5(&mut self, val: bool) {
self.bits.set_bit(5, val);
}
/// Store/AMO address misaligned
#[inline]
pub fn ex6(&self) -> bool {
self.bits.get_bit(6)
}
#[inline]
pub fn set_ex6(&mut self, val: bool) {
self.bits.set_bit(6, val);
}
/// Store/AMO access fault
#[inline]
pub fn ex7(&self) -> bool {
self.bits.get_bit(7)
}
#[inline]
pub fn set_ex7(&mut self, val: bool) {
self.bits.set_bit(7, val);
}
/// Environment call from U-mode or VU-mode
#[inline]
pub fn ex8(&self) -> bool {
self.bits.get_bit(8)
}
#[inline]
pub fn set_ex8(&mut self, val: bool) {
self.bits.set_bit(8, val);
}
/// Instruction page fault
#[inline]
pub fn ex12(&self) -> bool {
self.bits.get_bit(12)
}
#[inline]
pub fn set_ex12(&mut self, val: bool) {
self.bits.set_bit(12, val);
}
/// Load page fault
#[inline]
pub fn ex13(&self) -> bool {
self.bits.get_bit(13)
}
#[inline]
pub fn set_ex13(&mut self, val: bool) {
self.bits.set_bit(13, val);
}
/// Store/AMO page fault
#[inline]
pub fn ex15(&self) -> bool {
self.bits.get_bit(15)
}
#[inline]
pub fn set_ex15(&mut self, val: bool) {
self.bits.set_bit(15, val);
}
}
read_csr_as!(Hedeleg, 1538, __read_hedeleg);
write_csr!(1538, __write_hedeleg);
set!(1538, __set_hedeleg);
clear!(1538, __clear_hedeleg);
// bit ops
set_clear_csr!(
///Instruction address misaligned
, set_ex0, clear_ex0, 1 << 0);
set_clear_csr!(
///Instruction access fault
, set_ex1, clear_ex1, 1 << 1);
set_clear_csr!(
///Illegal instruction
, set_ex2, clear_ex2, 1 << 2);
set_clear_csr!(
///Breakpoint
, set_ex3, clear_ex3, 1 << 3);
set_clear_csr!(
///Load address misaligned
, set_ex4, clear_ex4, 1 << 4);
set_clear_csr!(
///Load access fault
, set_ex5, clear_ex5, 1 << 5);
set_clear_csr!(
///Store/AMO address misaligned
, set_ex6, clear_ex6, 1 << 6);
set_clear_csr!(
///Store/AMO access fault
, set_ex7, clear_ex7, 1 << 7);
set_clear_csr!(
///Environment call from U-mode or VU-mode
, set_ex8, clear_ex8, 1 << 8);
set_clear_csr!(
///Instruction page fault
, set_ex12, clear_ex12, 1 << 12);
set_clear_csr!(
///Load page fault
, set_ex13, clear_ex13, 1 << 13);
set_clear_csr!(
///Store/AMO page fault
, set_ex15, clear_ex15, 1 << 15);
// enums

View File

@@ -1,73 +0,0 @@
//! Hypervisor Guest Address Translation and Protection Register.
use bit_field::BitField;
#[derive(Copy, Clone, Debug)]
pub struct Hgatp {
bits: usize,
}
impl Hgatp {
#[inline]
pub fn bits(&self) -> usize {
return self.bits;
}
#[inline]
pub fn from_bits(x: usize) -> Self {
return Hgatp { bits: x };
}
#[inline]
pub unsafe fn write(&self) {
_write(self.bits);
}
/// Guest address translation mode.
#[inline]
pub fn mode(&self) -> HgatpValues {
HgatpValues::from(self.bits.get_bits(60..64))
}
#[inline]
pub fn set_mode(&mut self, val: HgatpValues) {
self.bits.set_bits(60..64, val as usize);
}
/// Virtual machine ID.
#[inline]
pub fn vmid(&self) -> usize {
self.bits.get_bits(44..58)
}
#[inline]
pub fn set_vmid(&mut self, val: usize) {
self.bits.set_bits(44..58, val);
}
/// Physical Page Number for root page table.
#[inline]
pub fn ppn(&self) -> usize {
self.bits.get_bits(0..44)
}
#[inline]
pub fn set_ppn(&mut self, val: usize) {
self.bits.set_bits(0..44, val);
}
}
read_csr_as!(Hgatp, 1664, __read_hgatp);
write_csr!(1664, __write_hgatp);
set!(1664, __set_hgatp);
clear!(1664, __clear_hgatp);
// bit ops
// enums
#[derive(Copy, Clone, Debug)]
#[repr(usize)]
pub enum HgatpValues {
Bare = 0,
Sv39x4 = 8,
Sv48x4 = 9,
}
impl HgatpValues {
fn from(x: usize) -> Self {
match x {
0 => Self::Bare,
8 => Self::Sv39x4,
9 => Self::Sv48x4,
_ => unreachable!(),
}
}
}

View File

@@ -1,3 +0,0 @@
//! Hypervisor Guest External Interrupt Enable Register.
read_csr_as_usize!(1543, __read_hgeie);
write_csr_as_usize!(1543, __write_hgeie);

View File

@@ -1,3 +0,0 @@
//! Hypervisor Guest External Interrupt Pending Register.
read_csr_as_usize!(3602, __read_hgeip);
write_csr_as_usize!(3602, __write_hgeip);

View File

@@ -1,65 +0,0 @@
//! Hypervisor Interrupt Delegation Register.
use bit_field::BitField;
#[derive(Copy, Clone, Debug)]
pub struct Hideleg {
bits: usize,
}
impl Hideleg {
#[inline]
pub fn bits(&self) -> usize {
return self.bits;
}
#[inline]
pub fn from_bits(x: usize) -> Self {
return Hideleg { bits: x };
}
#[inline]
pub unsafe fn write(&self) {
_write(self.bits);
}
/// Software Interrupt
#[inline]
pub fn sip(&self) -> bool {
self.bits.get_bit(2)
}
#[inline]
pub fn set_sip(&mut self, val: bool) {
self.bits.set_bit(2, val);
}
/// Timer Interrupt
#[inline]
pub fn tip(&self) -> bool {
self.bits.get_bit(6)
}
#[inline]
pub fn set_tip(&mut self, val: bool) {
self.bits.set_bit(6, val);
}
/// External Interrupt
#[inline]
pub fn eip(&self) -> bool {
self.bits.get_bit(10)
}
#[inline]
pub fn set_eip(&mut self, val: bool) {
self.bits.set_bit(10, val);
}
}
read_csr_as!(Hideleg, 1539, __read_hideleg);
write_csr!(1539, __write_hideleg);
set!(1539, __set_hideleg);
clear!(1539, __clear_hideleg);
// bit ops
set_clear_csr!(
///Software Interrupt
, set_sip, clear_sip, 1 << 2);
set_clear_csr!(
///Timer Interrupt
, set_tip, clear_tip, 1 << 6);
set_clear_csr!(
///External Interrupt
, set_eip, clear_eip, 1 << 10);
// enums

View File

@@ -1,77 +0,0 @@
//! Hypervisor Interrupt Enable Register.
use bit_field::BitField;
#[derive(Copy, Clone, Debug)]
pub struct Hie {
bits: usize,
}
impl Hie {
#[inline]
pub fn bits(&self) -> usize {
return self.bits;
}
#[inline]
pub fn from_bits(x: usize) -> Self {
return Hie { bits: x };
}
#[inline]
pub unsafe fn write(&self) {
_write(self.bits);
}
/// Software Interrupt
#[inline]
pub fn vssie(&self) -> bool {
self.bits.get_bit(2)
}
#[inline]
pub fn set_vssie(&mut self, val: bool) {
self.bits.set_bit(2, val);
}
/// Timer Interrupt
#[inline]
pub fn vstie(&self) -> bool {
self.bits.get_bit(6)
}
#[inline]
pub fn set_vstie(&mut self, val: bool) {
self.bits.set_bit(6, val);
}
/// External Interrupt
#[inline]
pub fn vseie(&self) -> bool {
self.bits.get_bit(10)
}
#[inline]
pub fn set_vseie(&mut self, val: bool) {
self.bits.set_bit(10, val);
}
/// Guest External Interrupt
#[inline]
pub fn sgeie(&self) -> bool {
self.bits.get_bit(12)
}
#[inline]
pub fn set_sgeie(&mut self, val: bool) {
self.bits.set_bit(12, val);
}
}
read_csr_as!(Hie, 1540, __read_hie);
write_csr!(1540, __write_hie);
set!(1540, __set_hie);
clear!(1540, __clear_hie);
// bit ops
set_clear_csr!(
///Software Interrupt
, set_vssie, clear_vssie, 1 << 2);
set_clear_csr!(
///Timer Interrupt
, set_vstie, clear_vstie, 1 << 6);
set_clear_csr!(
///External Interrupt
, set_vseie, clear_vseie, 1 << 10);
set_clear_csr!(
///Guest External Interrupt
, set_sgeie, clear_sgeie, 1 << 12);
// enums

View File

@@ -1,77 +0,0 @@
//! Hypervisor Interrupt Pending Register.
use bit_field::BitField;
#[derive(Copy, Clone, Debug)]
pub struct Hip {
bits: usize,
}
impl Hip {
#[inline]
pub fn bits(&self) -> usize {
return self.bits;
}
#[inline]
pub fn from_bits(x: usize) -> Self {
return Hip { bits: x };
}
#[inline]
pub unsafe fn write(&self) {
_write(self.bits);
}
/// Software Interrupt
#[inline]
pub fn vssip(&self) -> bool {
self.bits.get_bit(2)
}
#[inline]
pub fn set_vssip(&mut self, val: bool) {
self.bits.set_bit(2, val);
}
/// Timer Interrupt
#[inline]
pub fn vstip(&self) -> bool {
self.bits.get_bit(6)
}
#[inline]
pub fn set_vstip(&mut self, val: bool) {
self.bits.set_bit(6, val);
}
/// External Interrupt
#[inline]
pub fn vseip(&self) -> bool {
self.bits.get_bit(10)
}
#[inline]
pub fn set_vseip(&mut self, val: bool) {
self.bits.set_bit(10, val);
}
/// Guest External Interrupt
#[inline]
pub fn sgeip(&self) -> bool {
self.bits.get_bit(12)
}
#[inline]
pub fn set_sgeip(&mut self, val: bool) {
self.bits.set_bit(12, val);
}
}
read_csr_as!(Hip, 1604, __read_hip);
write_csr!(1604, __write_hip);
set!(1604, __set_hip);
clear!(1604, __clear_hip);
// bit ops
set_clear_csr!(
///Software Interrupt
, set_vssip, clear_vssip, 1 << 2);
set_clear_csr!(
///Timer Interrupt
, set_vstip, clear_vstip, 1 << 6);
set_clear_csr!(
///External Interrupt
, set_vseip, clear_vseip, 1 << 10);
set_clear_csr!(
///Guest External Interrupt
, set_sgeip, clear_sgeip, 1 << 12);
// enums

View File

@@ -1,160 +0,0 @@
//! HStatus Register.
use bit_field::BitField;
#[derive(Copy, Clone, Debug)]
pub struct Hstatus {
bits: usize,
}
impl Hstatus {
#[inline]
pub fn bits(&self) -> usize {
return self.bits;
}
#[inline]
pub fn from_bits(x: usize) -> Self {
return Hstatus { bits: x };
}
#[inline]
pub unsafe fn write(&self) {
_write(self.bits);
}
/// Effective XLEN for VM.
#[inline]
pub fn vsxl(&self) -> VsxlValues {
VsxlValues::from(self.bits.get_bits(32..34))
}
#[inline]
pub fn set_vsxl(&mut self, val: VsxlValues) {
self.bits.set_bits(32..34, val as usize);
}
/// TSR for VM.
#[inline]
pub fn vtsr(&self) -> bool {
self.bits.get_bit(22)
}
#[inline]
pub fn set_vtsr(&mut self, val: bool) {
self.bits.set_bit(22, val);
}
/// TW for VM.
#[inline]
pub fn vtw(&self) -> bool {
self.bits.get_bit(21)
}
#[inline]
pub fn set_vtw(&mut self, val: bool) {
self.bits.set_bit(21, val);
}
/// TVM for VM.
#[inline]
pub fn vtvm(&self) -> bool {
self.bits.get_bit(20)
}
#[inline]
pub fn set_vtvm(&mut self, val: bool) {
self.bits.set_bit(20, val);
}
/// Virtual Guest External Interrupt Number.
#[inline]
pub fn vgein(&self) -> usize {
self.bits.get_bits(12..18)
}
#[inline]
pub fn set_vgein(&mut self, val: usize) {
self.bits.set_bits(12..18, val);
}
/// Hypervisor User mode.
#[inline]
pub fn hu(&self) -> bool {
self.bits.get_bit(9)
}
#[inline]
pub fn set_hu(&mut self, val: bool) {
self.bits.set_bit(9, val);
}
/// Supervisor Previous Virtual Privilege.
#[inline]
pub fn spvp(&self) -> bool {
self.bits.get_bit(8)
}
#[inline]
pub fn set_spvp(&mut self, val: bool) {
self.bits.set_bit(8, val);
}
/// Supervisor Previous Virtualization mode.
#[inline]
pub fn spv(&self) -> bool {
self.bits.get_bit(7)
}
#[inline]
pub fn set_spv(&mut self, val: bool) {
self.bits.set_bit(7, val);
}
/// Guest Virtual Address.
#[inline]
pub fn gva(&self) -> bool {
self.bits.get_bit(6)
}
#[inline]
pub fn set_gva(&mut self, val: bool) {
self.bits.set_bit(6, val);
}
/// VS access endianness.
#[inline]
pub fn vsbe(&self) -> bool {
self.bits.get_bit(5)
}
#[inline]
pub fn set_vsbe(&mut self, val: bool) {
self.bits.set_bit(5, val);
}
}
read_csr_as!(Hstatus, 1536, __read_hstatus);
write_csr!(1536, __write_hstatus);
set!(1536, __set_hstatus);
clear!(1536, __clear_hstatus);
// bit ops
set_clear_csr!(
///TSR for VM.
, set_vtsr, clear_vtsr, 1 << 22);
set_clear_csr!(
///TW for VM.
, set_vtw, clear_vtw, 1 << 21);
set_clear_csr!(
///TVM for VM.
, set_vtvm, clear_vtvm, 1 << 20);
set_clear_csr!(
///Hypervisor User mode.
, set_hu, clear_hu, 1 << 9);
set_clear_csr!(
///Supervisor Previous Virtual Privilege.
, set_spvp, clear_spvp, 1 << 8);
set_clear_csr!(
///Supervisor Previous Virtualization mode.
, set_spv, clear_spv, 1 << 7);
set_clear_csr!(
///Guest Virtual Address.
, set_gva, clear_gva, 1 << 6);
set_clear_csr!(
///VS access endianness.
, set_vsbe, clear_vsbe, 1 << 5);
// enums
#[derive(Copy, Clone, Debug)]
#[repr(usize)]
pub enum VsxlValues {
Vsxl32 = 1,
Vsxl64 = 2,
Vsxl128 = 3,
}
impl VsxlValues {
fn from(x: usize) -> Self {
match x {
1 => Self::Vsxl32,
2 => Self::Vsxl64,
3 => Self::Vsxl128,
_ => unreachable!(),
}
}
}

View File

@@ -1,4 +0,0 @@
//! Hypervisor Time Delta Register.
read_composite_csr!(super::htimedeltah::read(), read());
read_csr_as_usize!(1541, __read_htimedelta);
write_csr_as_usize!(1541, __write_htimedelta);

View File

@@ -1,3 +0,0 @@
//! Hypervisor Time Delta Register.
read_csr_as_usize!(1557, __read_htimedeltah);
write_csr_as_usize!(1557, __write_htimedeltah);

View File

@@ -1,3 +0,0 @@
//! Hypervisor Trap Instruction Register.
read_csr_as_usize!(1610, __read_htinst);
write_csr_as_usize!(1610, __write_htinst);

View File

@@ -1,3 +0,0 @@
//! Hypervisor Trap Value Register.
read_csr_as_usize!(1603, __read_htval);
write_csr_as_usize!(1603, __write_htval);

View File

@@ -1,65 +0,0 @@
//! Hypervisor Virtual Interrupt Pending Register.
use bit_field::BitField;
#[derive(Copy, Clone, Debug)]
pub struct Hvip {
bits: usize,
}
impl Hvip {
#[inline]
pub fn bits(&self) -> usize {
return self.bits;
}
#[inline]
pub fn from_bits(x: usize) -> Self {
return Hvip { bits: x };
}
#[inline]
pub unsafe fn write(&self) {
_write(self.bits);
}
/// Software Interrupt
#[inline]
pub fn vssip(&self) -> bool {
self.bits.get_bit(2)
}
#[inline]
pub fn set_vssip(&mut self, val: bool) {
self.bits.set_bit(2, val);
}
/// Timer Interrupt
#[inline]
pub fn vstip(&self) -> bool {
self.bits.get_bit(6)
}
#[inline]
pub fn set_vstip(&mut self, val: bool) {
self.bits.set_bit(6, val);
}
/// External Interrupt
#[inline]
pub fn vseip(&self) -> bool {
self.bits.get_bit(10)
}
#[inline]
pub fn set_vseip(&mut self, val: bool) {
self.bits.set_bit(10, val);
}
}
read_csr_as!(Hvip, 1605, __read_hvip);
write_csr!(1605, __write_hvip);
set!(1605, __set_hvip);
clear!(1605, __clear_hvip);
// bit ops
set_clear_csr!(
///Software Interrupt
, set_vssip, clear_vssip, 1 << 2);
set_clear_csr!(
///Timer Interrupt
, set_vstip, clear_vstip, 1 << 6);
set_clear_csr!(
///External Interrupt
, set_vseip, clear_vseip, 1 << 10);
// enums

View File

@@ -1,23 +0,0 @@
pub mod hcounteren;
pub mod hedeleg;
pub mod hgatp;
pub mod hgeie;
pub mod hgeip;
pub mod hideleg;
pub mod hie;
pub mod hip;
pub mod hstatus;
pub mod htimedelta;
pub mod htimedeltah;
pub mod htinst;
pub mod htval;
pub mod hvip;
pub mod vsatp;
pub mod vscause;
pub mod vsepc;
pub mod vsie;
pub mod vsip;
pub mod vsscratch;
pub mod vsstatus;
pub mod vstval;
pub mod vstvec;

View File

@@ -1,73 +0,0 @@
//! Virtual Supervisor Guest Address Translation and Protection Register.
use bit_field::BitField;
#[derive(Copy, Clone, Debug)]
pub struct Vsatp {
bits: usize,
}
impl Vsatp {
#[inline]
pub fn bits(&self) -> usize {
return self.bits;
}
#[inline]
pub fn from_bits(x: usize) -> Self {
return Vsatp { bits: x };
}
#[inline]
pub unsafe fn write(&self) {
_write(self.bits);
}
/// Guest address translation mode.
#[inline]
pub fn mode(&self) -> HgatpValues {
HgatpValues::from(self.bits.get_bits(60..64))
}
#[inline]
pub fn set_mode(&mut self, val: HgatpValues) {
self.bits.set_bits(60..64, val as usize);
}
/// ASID.
#[inline]
pub fn asid(&self) -> usize {
self.bits.get_bits(44..60)
}
#[inline]
pub fn set_asid(&mut self, val: usize) {
self.bits.set_bits(44..60, val);
}
/// Physical Page Number for root page table.
#[inline]
pub fn ppn(&self) -> usize {
self.bits.get_bits(0..44)
}
#[inline]
pub fn set_ppn(&mut self, val: usize) {
self.bits.set_bits(0..44, val);
}
}
read_csr_as!(Vsatp, 640, __read_vsatp);
write_csr!(640, __write_vsatp);
set!(640, __set_vsatp);
clear!(640, __clear_vsatp);
// bit ops
// enums
#[derive(Copy, Clone, Debug)]
#[repr(usize)]
pub enum HgatpValues {
Bare = 0,
Sv39x4 = 8,
Sv48x4 = 9,
}
impl HgatpValues {
fn from(x: usize) -> Self {
match x {
0 => Self::Bare,
8 => Self::Sv39x4,
9 => Self::Sv48x4,
_ => unreachable!(),
}
}
}

View File

@@ -1,50 +0,0 @@
//! Virtual Supervisor Cause Register.
use bit_field::BitField;
#[derive(Copy, Clone, Debug)]
pub struct Vscause {
bits: usize,
}
impl Vscause {
#[inline]
pub fn bits(&self) -> usize {
return self.bits;
}
#[inline]
pub fn from_bits(x: usize) -> Self {
return Vscause { bits: x };
}
#[inline]
pub unsafe fn write(&self) {
_write(self.bits);
}
/// Is cause interrupt.
#[inline]
pub fn interrupt(&self) -> bool {
self.bits.get_bit(63)
}
#[inline]
pub fn set_interrupt(&mut self, val: bool) {
self.bits.set_bit(63, val);
}
/// Exception code
#[inline]
pub fn code(&self) -> usize {
self.bits.get_bits(0..63)
}
#[inline]
pub fn set_code(&mut self, val: usize) {
self.bits.set_bits(0..63, val);
}
}
read_csr_as!(Vscause, 578, __read_vscause);
write_csr!(578, __write_vscause);
set!(578, __set_vscause);
clear!(578, __clear_vscause);
// bit ops
set_clear_csr!(
///Is cause interrupt.
, set_interrupt, clear_interrupt, 1 << 63);
// enums

View File

@@ -1,3 +0,0 @@
//! Virtual Supervisor Exception Program Counter.
read_csr_as_usize!(577, __read_vsepc);
write_csr_as_usize!(577, __write_vsepc);

View File

@@ -1,65 +0,0 @@
//! Virtual Supevisor Interrupt Enable Register.
use bit_field::BitField;
#[derive(Copy, Clone, Debug)]
pub struct Vsie {
bits: usize,
}
impl Vsie {
#[inline]
pub fn bits(&self) -> usize {
return self.bits;
}
#[inline]
pub fn from_bits(x: usize) -> Self {
return Vsie { bits: x };
}
#[inline]
pub unsafe fn write(&self) {
_write(self.bits);
}
/// Software Interrupt
#[inline]
pub fn ssie(&self) -> bool {
self.bits.get_bit(1)
}
#[inline]
pub fn set_ssie(&mut self, val: bool) {
self.bits.set_bit(1, val);
}
/// Timer Interrupt
#[inline]
pub fn stie(&self) -> bool {
self.bits.get_bit(5)
}
#[inline]
pub fn set_stie(&mut self, val: bool) {
self.bits.set_bit(5, val);
}
/// External Interrupt
#[inline]
pub fn seie(&self) -> bool {
self.bits.get_bit(9)
}
#[inline]
pub fn set_seie(&mut self, val: bool) {
self.bits.set_bit(9, val);
}
}
read_csr_as!(Vsie, 516, __read_vsie);
write_csr!(516, __write_vsie);
set!(516, __set_vsie);
clear!(516, __clear_vsie);
// bit ops
set_clear_csr!(
///Software Interrupt
, set_ssie, clear_ssie, 1 << 1);
set_clear_csr!(
///Timer Interrupt
, set_stie, clear_stie, 1 << 5);
set_clear_csr!(
///External Interrupt
, set_seie, clear_seie, 1 << 9);
// enums

View File

@@ -1,65 +0,0 @@
//! Virtual Supevisor Interrupt Pending Register.
use bit_field::BitField;
#[derive(Copy, Clone, Debug)]
pub struct Vsip {
bits: usize,
}
impl Vsip {
#[inline]
pub fn bits(&self) -> usize {
return self.bits;
}
#[inline]
pub fn from_bits(x: usize) -> Self {
return Vsip { bits: x };
}
#[inline]
pub unsafe fn write(&self) {
_write(self.bits);
}
/// Software Interrupt
#[inline]
pub fn ssip(&self) -> bool {
self.bits.get_bit(1)
}
#[inline]
pub fn set_ssip(&mut self, val: bool) {
self.bits.set_bit(1, val);
}
/// Timer Interrupt
#[inline]
pub fn stip(&self) -> bool {
self.bits.get_bit(5)
}
#[inline]
pub fn set_stip(&mut self, val: bool) {
self.bits.set_bit(5, val);
}
/// External Interrupt
#[inline]
pub fn seip(&self) -> bool {
self.bits.get_bit(9)
}
#[inline]
pub fn set_seip(&mut self, val: bool) {
self.bits.set_bit(9, val);
}
}
read_csr_as!(Vsip, 580, __read_vsip);
write_csr!(580, __write_vsip);
set!(580, __set_vsip);
clear!(580, __clear_vsip);
// bit ops
set_clear_csr!(
///Software Interrupt
, set_ssip, clear_ssip, 1 << 1);
set_clear_csr!(
///Timer Interrupt
, set_stip, clear_stip, 1 << 5);
set_clear_csr!(
///External Interrupt
, set_seip, clear_seip, 1 << 9);
// enums

Some files were not shown because too many files have changed in this diff Show More