jos/kern/trap.c

413 lines
12 KiB
C
Raw Normal View History

2018-09-25 09:22:51 -07:00
#include <inc/mmu.h>
#include <inc/x86.h>
#include <inc/assert.h>
#include <kern/pmap.h>
#include <kern/trap.h>
#include <kern/console.h>
#include <kern/monitor.h>
#include <kern/env.h>
#include <kern/syscall.h>
2018-10-06 06:52:47 -07:00
#include <kern/sched.h>
#include <kern/kclock.h>
#include <kern/picirq.h>
#include <kern/cpu.h>
#include <kern/spinlock.h>
2018-09-25 09:22:51 -07:00
static struct Taskstate ts;
/* For debugging, so print_trapframe can distinguish between printing
* a saved trapframe and printing the current trapframe and print some
* additional information in the latter case.
*/
static struct Trapframe *last_tf;
/* Interrupt descriptor table. (Must be built at run time because
* shifted function addresses can't be represented in relocation records.)
*/
struct Gatedesc idt[256] = { { 0 } };
struct Pseudodesc idt_pd = {
sizeof(idt) - 1, (uint32_t) idt
};
static const char *trapname(int trapno)
{
static const char * const excnames[] = {
"Divide error",
"Debug",
"Non-Maskable Interrupt",
"Breakpoint",
"Overflow",
"BOUND Range Exceeded",
"Invalid Opcode",
"Device Not Available",
"Double Fault",
"Coprocessor Segment Overrun",
"Invalid TSS",
"Segment Not Present",
"Stack Fault",
"General Protection",
"Page Fault",
"(unknown trap)",
"x87 FPU Floating-Point Error",
"Alignment Check",
"Machine-Check",
"SIMD Floating-Point Exception"
};
if (trapno < ARRAY_SIZE(excnames))
return excnames[trapno];
if (trapno == T_SYSCALL)
return "System call";
2018-10-06 06:52:47 -07:00
if (trapno >= IRQ_OFFSET && trapno < IRQ_OFFSET + 16)
return "Hardware Interrupt";
2018-09-25 09:22:51 -07:00
return "(unknown trap)";
}
2019-04-01 21:22:40 -07:00
// XYZ: write a function declaration here...
// e.g., void t_divide();
void t_divide();
void t_debug();
void t_nmi();
void t_brkpt();
void t_oflow();
void t_bound();
void t_illop();
void t_device();
void t_dblflt();
void t_tss();
void t_segnp();
void t_stack();
void t_gpflt();
void t_pgflt();
void t_fperr();
void t_align();
void t_mchk();
void t_simderr();
void t_syscall();
void t_default();
2019-04-01 21:22:40 -07:00
2018-09-25 09:22:51 -07:00
void
trap_init(void)
{
extern struct Segdesc gdt[];
2019-04-01 21:22:40 -07:00
/*
*
* HINT
* Do something like this: SETGATE(idt[T_DIVIDE], 0, GD_KT, t_divide, 0);
* if your trap handler's name for divide by zero is t_device.
* Additionally, you should declare trap handler as a function
* to refer that in C code... (see the comment XYZ above)
*
*/
2018-09-25 09:22:51 -07:00
// LAB 3: Your code here.
SETGATE(idt[T_DIVIDE], 0, GD_KT, t_divide, 0);
SETGATE(idt[T_DEBUG], 0, GD_KT, t_debug, 0);
SETGATE(idt[T_NMI], 0, GD_KT, t_nmi, 0);
2019-04-23 14:26:02 -07:00
SETGATE(idt[T_BRKPT], 0, GD_KT, t_brkpt, 3);
SETGATE(idt[T_OFLOW], 0, GD_KT, t_oflow, 0);
SETGATE(idt[T_BOUND], 0, GD_KT, t_bound, 0);
SETGATE(idt[T_ILLOP], 0, GD_KT, t_illop, 0);
SETGATE(idt[T_DEVICE], 0, GD_KT, t_device, 0);
SETGATE(idt[T_DBLFLT], 0, GD_KT, t_dblflt, 0);
SETGATE(idt[T_TSS], 0, GD_KT, t_tss, 0);
SETGATE(idt[T_SEGNP], 0, GD_KT, t_segnp, 0);
SETGATE(idt[T_STACK], 0, GD_KT, t_stack, 0);
SETGATE(idt[T_GPFLT], 0, GD_KT, t_gpflt, 0);
SETGATE(idt[T_PGFLT], 0, GD_KT, t_pgflt, 0);
SETGATE(idt[T_FPERR], 0, GD_KT, t_fperr, 0);
SETGATE(idt[T_ALIGN], 0, GD_KT, t_align, 0);
SETGATE(idt[T_MCHK], 0, GD_KT, t_mchk, 0);
SETGATE(idt[T_SIMDERR], 0, GD_KT, t_simderr, 0);
SETGATE(idt[T_SYSCALL], 0, GD_KT, t_syscall, 3);
SETGATE(idt[T_DEFAULT], 0, GD_KT, t_default, 0);
2018-09-25 09:22:51 -07:00
2019-04-01 21:22:40 -07:00
// Per-CPU setup
2018-09-25 09:22:51 -07:00
trap_init_percpu();
}
// Initialize and load the per-CPU TSS and IDT
void
trap_init_percpu(void)
{
2018-10-06 06:52:47 -07:00
// The example code here sets up the Task State Segment (TSS) and
// the TSS descriptor for CPU 0. But it is incorrect if we are
// running on other CPUs because each CPU has its own kernel stack.
// Fix the code so that it works for all CPUs.
//
// Hints:
// - The macro "thiscpu" always refers to the current CPU's
// struct CpuInfo;
// - The ID of the current CPU is given by cpunum() or
// thiscpu->cpu_id;
// - Use "thiscpu->cpu_ts" as the TSS for the current CPU,
// rather than the global "ts" variable;
// - Use gdt[(GD_TSS0 >> 3) + i] for CPU i's TSS descriptor;
// - You mapped the per-CPU kernel stacks in mem_init_mp()
// - Initialize cpu_ts.ts_iomb to prevent unauthorized environments
// from doing IO (0 is not the correct value!)
//
// ltr sets a 'busy' flag in the TSS selector, so if you
// accidentally load the same TSS on more than one CPU, you'll
// get a triple fault. If you set up an individual CPU's TSS
// wrong, you may not get a fault until you try to return from
// user space on that CPU.
//
// LAB 4: Your code here:
2018-09-25 09:22:51 -07:00
// Setup a TSS so that we get the right stack
// when we trap to the kernel.
2019-05-05 19:42:15 -07:00
thiscpu->cpu_ts.ts_esp0 = KSTACKTOP - thiscpu->cpu_id * (KSTKSIZE + KSTKGAP);
thiscpu->cpu_ts.ts_ss0 = GD_KD;
thiscpu->cpu_ts.ts_iomb = sizeof(struct Taskstate);
2018-09-25 09:22:51 -07:00
// Initialize the TSS slot of the gdt.
2019-05-05 19:42:15 -07:00
gdt[(GD_TSS0 >> 3) + cpunum()] = SEG16(STS_T32A, (uint32_t) (&thiscpu->cpu_ts),
2018-09-25 09:22:51 -07:00
sizeof(struct Taskstate) - 1, 0);
2019-05-05 19:42:15 -07:00
gdt[(GD_TSS0 >> 3) + cpunum()].sd_s = 0;
2018-09-25 09:22:51 -07:00
// Load the TSS selector (like other segment selectors, the
// bottom three bits are special; we leave them 0)
2019-05-05 19:42:15 -07:00
ltr(GD_TSS0 + (cpunum() << 3));
2018-09-25 09:22:51 -07:00
// Load the IDT
lidt(&idt_pd);
}
void
print_trapframe(struct Trapframe *tf)
{
2018-10-06 06:52:47 -07:00
cprintf("TRAP frame at %p from CPU %d\n", tf, cpunum());
2018-09-25 09:22:51 -07:00
print_regs(&tf->tf_regs);
cprintf(" es 0x----%04x\n", tf->tf_es);
cprintf(" ds 0x----%04x\n", tf->tf_ds);
cprintf(" trap 0x%08x %s\n", tf->tf_trapno, trapname(tf->tf_trapno));
// If this trap was a page fault that just happened
// (so %cr2 is meaningful), print the faulting linear address.
if (tf == last_tf && tf->tf_trapno == T_PGFLT)
cprintf(" cr2 0x%08x\n", rcr2());
cprintf(" err 0x%08x", tf->tf_err);
// For page faults, print decoded fault error code:
// U/K=fault occurred in user/kernel mode
// W/R=a write/read caused the fault
// PR=a protection violation caused the fault (NP=page not present).
if (tf->tf_trapno == T_PGFLT)
cprintf(" [%s, %s, %s]\n",
tf->tf_err & 4 ? "user" : "kernel",
tf->tf_err & 2 ? "write" : "read",
tf->tf_err & 1 ? "protection" : "not-present");
else
cprintf("\n");
cprintf(" eip 0x%08x\n", tf->tf_eip);
cprintf(" cs 0x----%04x\n", tf->tf_cs);
cprintf(" flag 0x%08x\n", tf->tf_eflags);
if ((tf->tf_cs & 3) != 0) {
cprintf(" esp 0x%08x\n", tf->tf_esp);
cprintf(" ss 0x----%04x\n", tf->tf_ss);
}
}
void
print_regs(struct PushRegs *regs)
{
cprintf(" edi 0x%08x\n", regs->reg_edi);
cprintf(" esi 0x%08x\n", regs->reg_esi);
cprintf(" ebp 0x%08x\n", regs->reg_ebp);
cprintf(" oesp 0x%08x\n", regs->reg_oesp);
cprintf(" ebx 0x%08x\n", regs->reg_ebx);
cprintf(" edx 0x%08x\n", regs->reg_edx);
cprintf(" ecx 0x%08x\n", regs->reg_ecx);
cprintf(" eax 0x%08x\n", regs->reg_eax);
}
static void
trap_dispatch(struct Trapframe *tf)
{
// Handle processor exceptions.
// LAB 3: Your code here.
2019-04-23 14:26:02 -07:00
if (tf->tf_trapno == T_PGFLT) {
page_fault_handler(tf);
2019-04-23 16:31:01 -07:00
return;
2019-04-24 23:49:13 -07:00
} else if (tf->tf_trapno == T_BRKPT || tf->tf_trapno == T_DEBUG) {
2019-04-23 14:26:02 -07:00
monitor(tf);
2019-04-23 16:31:01 -07:00
return;
} else if (tf->tf_trapno == T_SYSCALL) {
2019-04-23 17:10:35 -07:00
int32_t returned = syscall(tf->tf_regs.reg_eax,
tf->tf_regs.reg_edx,
tf->tf_regs.reg_ecx,
tf->tf_regs.reg_ebx,
tf->tf_regs.reg_edi,
tf->tf_regs.reg_esi);
2019-04-23 17:10:35 -07:00
tf->tf_regs.reg_eax = returned;
2019-04-23 16:31:01 -07:00
return;
2019-04-23 14:26:02 -07:00
}
2018-09-25 09:22:51 -07:00
2018-10-06 06:52:47 -07:00
// Handle spurious interrupts
// The hardware sometimes raises these because of noise on the
// IRQ line or other reasons. We don't care.
if (tf->tf_trapno == IRQ_OFFSET + IRQ_SPURIOUS) {
cprintf("Spurious interrupt on irq 7\n");
print_trapframe(tf);
return;
}
// Handle clock interrupts. Don't forget to acknowledge the
// interrupt using lapic_eoi() before calling the scheduler!
// LAB 4: Your code here.
2018-09-25 09:22:51 -07:00
// Unexpected trap: The user process or the kernel has a bug.
print_trapframe(tf);
if (tf->tf_cs == GD_KT)
panic("unhandled trap in kernel");
else {
env_destroy(curenv);
return;
}
}
void
trap(struct Trapframe *tf)
{
// The environment may have set DF and some versions
// of GCC rely on DF being clear
asm volatile("cld" ::: "cc");
2018-10-06 06:52:47 -07:00
// Halt the CPU if some other CPU has called panic()
extern char *panicstr;
if (panicstr)
asm volatile("hlt");
// Re-acqurie the big kernel lock if we were halted in
// sched_yield()
if (xchg(&thiscpu->cpu_status, CPU_STARTED) == CPU_HALTED)
lock_kernel();
2018-09-25 09:22:51 -07:00
// Check that interrupts are disabled. If this assertion
// fails, DO NOT be tempted to fix it by inserting a "cli" in
// the interrupt path.
assert(!(read_eflags() & FL_IF));
if ((tf->tf_cs & 3) == 3) {
// Trapped from user mode.
2018-10-06 06:52:47 -07:00
// Acquire the big kernel lock before doing any
// serious kernel work.
// LAB 4: Your code here.
2019-05-05 19:42:15 -07:00
lock_kernel();
2018-09-25 09:22:51 -07:00
assert(curenv);
2018-10-06 06:52:47 -07:00
// Garbage collect if current enviroment is a zombie
if (curenv->env_status == ENV_DYING) {
env_free(curenv);
curenv = NULL;
sched_yield();
}
2018-09-25 09:22:51 -07:00
// Copy trap frame (which is currently on the stack)
// into 'curenv->env_tf', so that running the environment
// will restart at the trap point.
curenv->env_tf = *tf;
// The trapframe on the stack should be ignored from here on.
tf = &curenv->env_tf;
}
// Record that tf is the last real trapframe so
// print_trapframe can print some additional information.
last_tf = tf;
// Dispatch based on what type of trap occurred
trap_dispatch(tf);
2018-10-06 06:52:47 -07:00
// If we made it to this point, then no other environment was
// scheduled, so we should return to the current environment
// if doing so makes sense.
if (curenv && curenv->env_status == ENV_RUNNING)
env_run(curenv);
else
sched_yield();
2018-09-25 09:22:51 -07:00
}
void
page_fault_handler(struct Trapframe *tf)
{
uint32_t fault_va;
// Read processor's CR2 register to find the faulting address
fault_va = rcr2();
// Handle kernel-mode page faults.
// LAB 3: Your code here.
// We've already handled kernel-mode exceptions, so if we get here,
// the page fault happened in user mode.
2018-10-06 06:52:47 -07:00
// Call the environment's page fault upcall, if one exists. Set up a
// page fault stack frame on the user exception stack (below
// UXSTACKTOP), then branch to curenv->env_pgfault_upcall.
//
// The page fault upcall might cause another page fault, in which case
// we branch to the page fault upcall recursively, pushing another
// page fault stack frame on top of the user exception stack.
//
// It is convenient for our code which returns from a page fault
// (lib/pfentry.S) to have one word of scratch space at the top of the
// trap-time stack; it allows us to more easily restore the eip/esp. In
// the non-recursive case, we don't have to worry about this because
// the top of the regular user stack is free. In the recursive case,
// this means we have to leave an extra word between the current top of
// the exception stack and the new stack frame because the exception
// stack _is_ the trap-time stack.
//
// If there's no page fault upcall, the environment didn't allocate a
// page for its exception stack or can't write to it, or the exception
// stack overflows, then destroy the environment that caused the fault.
// Note that the grade script assumes you will first check for the page
// fault upcall and print the "user fault va" message below if there is
// none. The remaining three checks can be combined into a single test.
//
// Hints:
// user_mem_assert() and env_run() are useful here.
// To change what the user environment runs, modify 'curenv->env_tf'
// (the 'tf' variable points at 'curenv->env_tf').
// LAB 4: Your code here.
2019-05-05 19:42:15 -07:00
if(!curenv->env_pgfault_upcall) {
// Destroy the environment that caused the fault.
cprintf("[%08x] user fault va %08x ip %08x\n",
curenv->env_id, fault_va, tf->tf_eip);
print_trapframe(tf);
env_destroy(curenv);
}
user_mem_assert(curenv, curenv->env_pgfault_upcall, 1, PTE_U | PTE_P);
user_mem_assert(curenv, (void*) UXSTACKTOP - 1, 1, PTE_U | PTE_P | PTE_W);
2018-10-06 06:52:47 -07:00
2019-05-05 19:42:15 -07:00
uintptr_t top_addr = UXSTACKTOP;
if(tf->tf_esp <= UXSTACKTOP && tf->tf_esp >= (UXSTACKTOP - PGSIZE)) {
top_addr = tf->tf_esp - 4;
}
struct UTrapframe utf;
utf.utf_eflags = tf->tf_eflags;
utf.utf_eip = tf->tf_eip;
utf.utf_err = tf->tf_err;
utf.utf_esp = tf->tf_esp;
utf.utf_fault_va = fault_va;
utf.utf_regs = tf->tf_regs;
struct UTrapframe* push_to = (struct UTrapframe*) top_addr - 1;
if((uintptr_t) push_to < USTACKTOP - PGSIZE) {
cprintf("[%08x] stack overflow in page fault handler\n",
curenv->env_id);
env_destroy(curenv);
}
*push_to = utf;
curenv->env_tf.tf_eip = (uintptr_t) curenv->env_pgfault_upcall;
curenv->env_tf.tf_esp = (uintptr_t) push_to;
env_run(curenv);
2018-09-25 09:22:51 -07:00
}