Compare commits
13 Commits
Author | SHA1 | Date | |
---|---|---|---|
9bec3d83bd | |||
0289ec3b3e | |||
46cc5c9478 | |||
c07415cffc | |||
e1d239139a | |||
e484704ce8 | |||
60ee3619af | |||
ca51596893 | |||
d7a96eb60f | |||
|
187496eb19 | ||
|
c56b8ebcbd | ||
|
815502c0cc | ||
|
2d1187aa3c |
|
@ -67,6 +67,7 @@ endif
|
|||
GDBPORT := $(shell expr `id -u` % 5000 + 25000)
|
||||
|
||||
CC := $(GCCPREFIX)gcc -pipe
|
||||
GDB := $(GCCPREFIX)gdb
|
||||
AS := $(GCCPREFIX)as
|
||||
AR := $(GCCPREFIX)ar
|
||||
LD := $(GCCPREFIX)ld
|
||||
|
@ -148,7 +149,7 @@ QEMUOPTS += $(QEMUEXTRA)
|
|||
sed "s/localhost:1234/localhost:$(GDBPORT)/" < $^ > $@
|
||||
|
||||
gdb:
|
||||
gdb -n -x .gdbinit
|
||||
$(GDB) -n -x .gdbinit
|
||||
|
||||
pre-qemu: .gdbinit
|
||||
|
||||
|
|
22
answers-lab2.txt
Normal file
22
answers-lab2.txt
Normal file
|
@ -0,0 +1,22 @@
|
|||
1. The entry given by PDX(UVPT) is mapped to the page directory itself,
|
||||
to allow programs to read existing memory mappings. The entry at PDX(UPAGES)
|
||||
is mapped to the pages variable in memory so that the kernel (and potentially other ring 0 programs) can access it. The entry pointed to by PDX(KSTACKTOP-KSTACKSIZE) is mapped
|
||||
to the bootstack location. Finally, both the memory pointed to by 0 and PDX(KERNBASE) are mapped to kernel memory. However, the mappings at VA 0 are read-only, so user programs can't touch them, while the mappings at PDX(KERNBASE) are kernel-private and RW.
|
||||
|
||||
A table could be as follows:
|
||||
|
||||
400 - Kernel memory
|
||||
... - Kernel memory
|
||||
3c0 - Kernel memory
|
||||
3bf - Kernel Stack
|
||||
3bd - UVPT
|
||||
3bc - UPAGES
|
||||
|
||||
|
||||
2. The kernel memory is mapped as kernel read write. This means there are several flags set in the kernel page directory and page table entries, which indicate that this is restricted memory. The lower 3 bits of the CS register will be checked when an access to this memory is made, and, if they are not 0 or 1 (indicating kernel code), the CPU generates a fault, and the user program gives up control back to the OS. Thus, unless a program is started in ring 0, it will not be able to read or write kernel memory, as it should.
|
||||
|
||||
3. The absolute maximum is 4GB, since we use 32-bit integers for referencing memory. Some of this memory is used for the kernel itself, as well as for the page directory and tables.
|
||||
|
||||
4. The page directory and the corresponding pages are all 4kb. The page directory can have 1024 entries, and each of these point to a page table. Thus, we use approximatey 4MB (slightly more than that, actually, due to the size of the page directory itself) of memory. Additionally, the "pages" structs (which are used to keep track of available physical pages), will require ~1000000 entries, each of which is between 6 and 8 bytes (depending on whether GCC aligns struct sizes). This means another 8MB is used to keep track of free pages, to a total of around 12MB.
|
||||
|
||||
5. We switch to high EIP when we jump to "relocated". Relocated is a label, and a symbol that's inserted by the linker. Since the linker is configured to link the kernel high, relocated points to the upper portion of memory, where KERNBASE is. However, the entry page directory, just like our full page directory later on, sets up two mappings, one starting at 0 (creating a one to one mapping between some of the virtual addresses and their physical counterparts), and one starting at KERNBASE. Thus, we can continue to run at a low EIP. The only reason I can think of as to why we NEED to make the switch, besides the elementary "the kernel links high", is that we need to be able to write to various symbols, also linked above KERNBASE.
|
|
@ -1,2 +1,2 @@
|
|||
LAB=1
|
||||
PACKAGEDATE=Thu Aug 30 15:16:04 EDT 2018
|
||||
LAB=2
|
||||
PACKAGEDATE=Wed Sep 12 14:51:29 EDT 2018
|
||||
|
|
28
grade-lab2
Executable file
28
grade-lab2
Executable file
|
@ -0,0 +1,28 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
from gradelib import *
|
||||
|
||||
r = Runner(save("jos.out"),
|
||||
stop_breakpoint("readline"))
|
||||
|
||||
@test(0, "running JOS")
|
||||
def test_jos():
|
||||
r.run_qemu()
|
||||
|
||||
@test(20, "Physical page allocator", parent=test_jos)
|
||||
def test_check_page_alloc():
|
||||
r.match(r"check_page_alloc\(\) succeeded!")
|
||||
|
||||
@test(20, "Page management", parent=test_jos)
|
||||
def test_check_page():
|
||||
r.match(r"check_page\(\) succeeded!")
|
||||
|
||||
@test(20, "Kernel page directory", parent=test_jos)
|
||||
def test_check_kern_pgdir():
|
||||
r.match(r"check_kern_pgdir\(\) succeeded!")
|
||||
|
||||
@test(10, "Page management 2", parent=test_jos)
|
||||
def test_check_page_installed_pgdir():
|
||||
r.match(r"check_page_installed_pgdir\(\) succeeded!")
|
||||
|
||||
run_tests()
|
|
@ -143,5 +143,46 @@
|
|||
typedef uint32_t pte_t;
|
||||
typedef uint32_t pde_t;
|
||||
|
||||
#if JOS_USER
|
||||
/*
|
||||
* The page directory entry corresponding to the virtual address range
|
||||
* [UVPT, UVPT + PTSIZE) points to the page directory itself. Thus, the page
|
||||
* directory is treated as a page table as well as a page directory.
|
||||
*
|
||||
* One result of treating the page directory as a page table is that all PTEs
|
||||
* can be accessed through a "virtual page table" at virtual address UVPT (to
|
||||
* which uvpt is set in lib/entry.S). The PTE for page number N is stored in
|
||||
* uvpt[N]. (It's worth drawing a diagram of this!)
|
||||
*
|
||||
* A second consequence is that the contents of the current page directory
|
||||
* will always be available at virtual address (UVPT + (UVPT >> PGSHIFT)), to
|
||||
* which uvpd is set in lib/entry.S.
|
||||
*/
|
||||
extern volatile pte_t uvpt[]; // VA of "virtual page table"
|
||||
extern volatile pde_t uvpd[]; // VA of current page directory
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Page descriptor structures, mapped at UPAGES.
|
||||
* Read/write to the kernel, read-only to user programs.
|
||||
*
|
||||
* Each struct PageInfo stores metadata for one physical page.
|
||||
* Is it NOT the physical page itself, but there is a one-to-one
|
||||
* correspondence between physical pages and struct PageInfo's.
|
||||
* You can map a struct PageInfo * to the corresponding physical address
|
||||
* with page2pa() in kern/pmap.h.
|
||||
*/
|
||||
struct PageInfo {
|
||||
// Next page on the free list.
|
||||
struct PageInfo *pp_link;
|
||||
|
||||
// pp_ref is the count of pointers (usually in page table entries)
|
||||
// to this page, for pages allocated using page_alloc.
|
||||
// Pages allocated at boot time using pmap.c's
|
||||
// boot_alloc do not have valid reference count fields.
|
||||
|
||||
uint16_t pp_ref;
|
||||
};
|
||||
|
||||
#endif /* !__ASSEMBLER__ */
|
||||
#endif /* !JOS_INC_MEMLAYOUT_H */
|
||||
|
|
16
kern/ansi.h
16
kern/ansi.h
|
@ -3,6 +3,22 @@
|
|||
|
||||
#include <inc/types.h>
|
||||
|
||||
#define ACOL_WRAP(s) "\33[" s "m"
|
||||
#define ACOL_BLACK ACOL_WRAP("30")
|
||||
#define ACOL_RED ACOL_WRAP("31")
|
||||
#define ACOL_GREEN ACOL_WRAP("32")
|
||||
#define ACOL_YELLOW ACOL_WRAP("33")
|
||||
#define ACOL_BLUE ACOL_WRAP("34")
|
||||
#define ACOL_MAGENTA ACOL_WRAP("35")
|
||||
#define ACOL_CYAN ACOL_WRAP("36")
|
||||
#define ACOL_WHITE ACOL_WRAP("37")
|
||||
#define ACOL_CLEAR ACOL_WRAP("0")
|
||||
|
||||
#define ACOL_TAG(c, s) "[" c s ACOL_CLEAR "] "
|
||||
#define ACOL_NOTE(s) ACOL_TAG(ACOL_WHITE, " Note ") s
|
||||
#define ACOL_WARN(s) ACOL_TAG(ACOL_YELLOW, "Warning") s
|
||||
#define ACOL_ERR(s) ACOL_TAG(ACOL_RED, " Error ") s
|
||||
|
||||
struct AttrState {
|
||||
uint8_t cattrs;
|
||||
uint8_t attrs;
|
||||
|
|
17
kern/init.c
17
kern/init.c
|
@ -6,18 +6,9 @@
|
|||
|
||||
#include <kern/monitor.h>
|
||||
#include <kern/console.h>
|
||||
#include <kern/pmap.h>
|
||||
#include <kern/kclock.h>
|
||||
|
||||
// Test the stack backtrace function (lab 1 only)
|
||||
void
|
||||
test_backtrace(int x)
|
||||
{
|
||||
cprintf("entering test_backtrace %d\n", x);
|
||||
if (x > 0)
|
||||
test_backtrace(x-1);
|
||||
else
|
||||
mon_backtrace(0, 0, 0);
|
||||
cprintf("leaving test_backtrace %d\n", x);
|
||||
}
|
||||
|
||||
void
|
||||
i386_init(void)
|
||||
|
@ -35,8 +26,8 @@ i386_init(void)
|
|||
|
||||
cprintf("444544 decimal is %o octal!\n", 444544);
|
||||
|
||||
// Test the stack backtrace function (lab 1 only)
|
||||
test_backtrace(5);
|
||||
// Lab 2 memory management initialization functions
|
||||
mem_init();
|
||||
|
||||
// Test ANSI color escape codes
|
||||
cprintf("\33[31m" "C"
|
||||
|
|
22
kern/kclock.c
Normal file
22
kern/kclock.c
Normal file
|
@ -0,0 +1,22 @@
|
|||
/* See COPYRIGHT for copyright information. */
|
||||
|
||||
/* Support for reading the NVRAM from the real-time clock. */
|
||||
|
||||
#include <inc/x86.h>
|
||||
|
||||
#include <kern/kclock.h>
|
||||
|
||||
|
||||
unsigned
|
||||
mc146818_read(unsigned reg)
|
||||
{
|
||||
outb(IO_RTC, reg);
|
||||
return inb(IO_RTC+1);
|
||||
}
|
||||
|
||||
void
|
||||
mc146818_write(unsigned reg, unsigned datum)
|
||||
{
|
||||
outb(IO_RTC, reg);
|
||||
outb(IO_RTC+1, datum);
|
||||
}
|
29
kern/kclock.h
Normal file
29
kern/kclock.h
Normal file
|
@ -0,0 +1,29 @@
|
|||
/* See COPYRIGHT for copyright information. */
|
||||
|
||||
#ifndef JOS_KERN_KCLOCK_H
|
||||
#define JOS_KERN_KCLOCK_H
|
||||
#ifndef JOS_KERNEL
|
||||
# error "This is a JOS kernel header; user programs should not #include it"
|
||||
#endif
|
||||
|
||||
#define IO_RTC 0x070 /* RTC port */
|
||||
|
||||
#define MC_NVRAM_START 0xe /* start of NVRAM: offset 14 */
|
||||
#define MC_NVRAM_SIZE 50 /* 50 bytes of NVRAM */
|
||||
|
||||
/* NVRAM bytes 7 & 8: base memory size */
|
||||
#define NVRAM_BASELO (MC_NVRAM_START + 7) /* low byte; RTC off. 0x15 */
|
||||
#define NVRAM_BASEHI (MC_NVRAM_START + 8) /* high byte; RTC off. 0x16 */
|
||||
|
||||
/* NVRAM bytes 9 & 10: extended memory size (between 1MB and 16MB) */
|
||||
#define NVRAM_EXTLO (MC_NVRAM_START + 9) /* low byte; RTC off. 0x17 */
|
||||
#define NVRAM_EXTHI (MC_NVRAM_START + 10) /* high byte; RTC off. 0x18 */
|
||||
|
||||
/* NVRAM bytes 38 and 39: extended memory size (between 16MB and 4G) */
|
||||
#define NVRAM_EXT16LO (MC_NVRAM_START + 38) /* low byte; RTC off. 0x34 */
|
||||
#define NVRAM_EXT16HI (MC_NVRAM_START + 39) /* high byte; RTC off. 0x35 */
|
||||
|
||||
unsigned mc146818_read(unsigned reg);
|
||||
void mc146818_write(unsigned reg, unsigned datum);
|
||||
|
||||
#endif // !JOS_KERN_KCLOCK_H
|
104
kern/monitor.c
104
kern/monitor.c
|
@ -7,9 +7,13 @@
|
|||
#include <inc/assert.h>
|
||||
#include <inc/x86.h>
|
||||
|
||||
#include <kern/ansi.h>
|
||||
#include <kern/console.h>
|
||||
#include <kern/monitor.h>
|
||||
#include <kern/kdebug.h>
|
||||
#include <kern/pmap.h>
|
||||
|
||||
#include <inc/string.h>
|
||||
|
||||
#define CMDBUF_SIZE 80 // enough for one VGA text line
|
||||
|
||||
|
@ -25,7 +29,10 @@ struct Command {
|
|||
static struct Command commands[] = {
|
||||
{ "help", "Display this list of commands", mon_help },
|
||||
{ "kerninfo", "Display information about the kernel", mon_kerninfo },
|
||||
{ "backtrace", "Display current backtrace", mon_backtrace }
|
||||
{ "backtrace", "Display current backtrace", mon_backtrace },
|
||||
{ "showmappings", "Display the physical mappings for range", mon_showmappings },
|
||||
{ "mperms", "Change the permissions of a memory range", mon_mperms }
|
||||
|
||||
};
|
||||
|
||||
/***** Implementations of basic kernel monitor commands *****/
|
||||
|
@ -84,7 +91,102 @@ mon_backtrace(int argc, char **argv, struct Trapframe *tf)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define EXPECT_ARGS(n, ac) if(ac - 1 != n) { \
|
||||
cprintf(ACOL_ERR("Expected %d arguments, " \
|
||||
"got %d\n"), n, ac - 1); \
|
||||
return 1; }
|
||||
|
||||
#define VA_TXT ACOL_CYAN "VA" ACOL_CLEAR
|
||||
#define PA_TXT ACOL_GREEN "PA" ACOL_CLEAR
|
||||
|
||||
char*
|
||||
decode_pte_perms(pte_t pte, char* buffer) {
|
||||
buffer[0] = (pte & PTE_W) ? 'w' : 'r';
|
||||
buffer[1] = (pte & PTE_U) ? 'u' : 'k';
|
||||
return buffer;
|
||||
}
|
||||
|
||||
void
|
||||
get_pagebounds(char* one, char* two, uintptr_t* pone, uintptr_t* ptwo) {
|
||||
long from = strtol(one, NULL, 0);
|
||||
long to = strtol(two, NULL, 0);
|
||||
*pone = ROUNDDOWN(from, PGSIZE);
|
||||
*ptwo = ROUNDUP(to, PGSIZE);
|
||||
if(*pone != from) cprintf(ACOL_WARN("Aligning start address %p down to %p\n"),
|
||||
from, *pone);
|
||||
if(*ptwo != to) cprintf(ACOL_WARN("Aligning end address %p up to %p\n"),
|
||||
to, *ptwo);
|
||||
}
|
||||
|
||||
int
|
||||
mon_showmappings(int argc, char** argv, struct Trapframe* tf) {
|
||||
EXPECT_ARGS(2, argc);
|
||||
uintptr_t va_start, va_end;
|
||||
char buffer[3] = {0};
|
||||
|
||||
get_pagebounds(argv[1], argv[2], &va_start, &va_end);
|
||||
|
||||
if(va_start == va_end) va_end += PGSIZE;
|
||||
while(va_start < va_end) {
|
||||
pte_t* pte = pgdir_walk(kern_pgdir, (const void*) va_start, 0);
|
||||
cprintf(VA_TXT " 0x%08p -> ", va_start);
|
||||
|
||||
if(pte && (*pte & PTE_P)) {
|
||||
cprintf(PA_TXT " 0x%08p (%s)\n", PTE_ADDR(*pte),
|
||||
decode_pte_perms(*pte, buffer));
|
||||
} else {
|
||||
cprintf(PA_TXT " ------------\n");
|
||||
}
|
||||
|
||||
va_start += PGSIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mon_mperms(int argc, char** argv, struct Trapframe* tf) {
|
||||
EXPECT_ARGS(3, argc);
|
||||
pte_t perms = 0;
|
||||
enum {
|
||||
PERM_ADD,
|
||||
PERM_REMOVE,
|
||||
PERM_SET
|
||||
} pmode;
|
||||
|
||||
const char* str = argv[1];
|
||||
if(str[0] == '+') { pmode = PERM_ADD; str++; }
|
||||
else if(str[0] == '-') { pmode = PERM_REMOVE; str++; }
|
||||
else pmode = PERM_SET;
|
||||
|
||||
while(*str) {
|
||||
if(*str == 'w') perms |= PTE_W;
|
||||
else if(*str == 'u') perms |= PTE_U;
|
||||
else {
|
||||
cprintf(ACOL_ERR("Unknown permission character %c\n"), *str);
|
||||
return 1;
|
||||
}
|
||||
str++;
|
||||
}
|
||||
|
||||
uintptr_t va_start, va_end;
|
||||
get_pagebounds(argv[2], argv[3], &va_start, &va_end);
|
||||
if(va_start == va_end) va_end += PGSIZE;
|
||||
while(va_start < va_end) {
|
||||
pte_t* pte = pgdir_walk(kern_pgdir, (void*) va_start, 0);
|
||||
if(!pte || !(*pte & PTE_P)) { va_start += PGSIZE; continue; }
|
||||
|
||||
if(pmode == PERM_ADD) {
|
||||
*pte |= perms;
|
||||
} else if(pmode == PERM_REMOVE) {
|
||||
*pte &= ~perms;
|
||||
} else if(pmode == PERM_SET) {
|
||||
*pte = PTE_ADDR(*pte) | perms | PTE_P;
|
||||
}
|
||||
va_start += PGSIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/***** Kernel monitor command interpreter *****/
|
||||
|
||||
|
|
|
@ -15,5 +15,7 @@ void monitor(struct Trapframe *tf);
|
|||
int mon_help(int argc, char **argv, struct Trapframe *tf);
|
||||
int mon_kerninfo(int argc, char **argv, struct Trapframe *tf);
|
||||
int mon_backtrace(int argc, char **argv, struct Trapframe *tf);
|
||||
int mon_showmappings(int argc, char **argv, struct Trapframe *tf);
|
||||
int mon_mperms(int argc, char** argv, struct Trapframe* tf);
|
||||
|
||||
#endif // !JOS_KERN_MONITOR_H
|
||||
|
|
921
kern/pmap.c
Normal file
921
kern/pmap.c
Normal file
|
@ -0,0 +1,921 @@
|
|||
/* See COPYRIGHT for copyright information. */
|
||||
|
||||
#include <inc/x86.h>
|
||||
#include <inc/mmu.h>
|
||||
#include <inc/error.h>
|
||||
#include <inc/string.h>
|
||||
#include <inc/assert.h>
|
||||
|
||||
#include <kern/pmap.h>
|
||||
#include <kern/kclock.h>
|
||||
|
||||
// These variables are set by i386_detect_memory()
|
||||
size_t npages; // Amount of physical memory (in pages)
|
||||
static size_t npages_basemem; // Amount of base memory (in pages)
|
||||
|
||||
// These variables are set in mem_init()
|
||||
pde_t *kern_pgdir; // Kernel's initial page directory
|
||||
struct PageInfo *pages; // Physical page state array
|
||||
static struct PageInfo *page_free_list; // Free list of physical pages
|
||||
|
||||
|
||||
// --------------------------------------------------------------
|
||||
// Detect machine's physical memory setup.
|
||||
// --------------------------------------------------------------
|
||||
|
||||
static int
|
||||
nvram_read(int r)
|
||||
{
|
||||
return mc146818_read(r) | (mc146818_read(r + 1) << 8);
|
||||
}
|
||||
|
||||
static void
|
||||
i386_detect_memory(void)
|
||||
{
|
||||
size_t basemem, extmem, ext16mem, totalmem;
|
||||
|
||||
// Use CMOS calls to measure available base & extended memory.
|
||||
// (CMOS calls return results in kilobytes.)
|
||||
basemem = nvram_read(NVRAM_BASELO);
|
||||
extmem = nvram_read(NVRAM_EXTLO);
|
||||
ext16mem = nvram_read(NVRAM_EXT16LO) * 64;
|
||||
|
||||
// Calculate the number of physical pages available in both base
|
||||
// and extended memory.
|
||||
if (ext16mem)
|
||||
totalmem = 16 * 1024 + ext16mem;
|
||||
else if (extmem)
|
||||
totalmem = 1 * 1024 + extmem;
|
||||
else
|
||||
totalmem = basemem;
|
||||
|
||||
npages = totalmem / (PGSIZE / 1024);
|
||||
npages_basemem = basemem / (PGSIZE / 1024);
|
||||
|
||||
cprintf("Physical memory: %uK available, base = %uK, extended = %uK\n",
|
||||
totalmem, basemem, totalmem - basemem);
|
||||
}
|
||||
|
||||
|
||||
// --------------------------------------------------------------
|
||||
// Set up memory mappings above UTOP.
|
||||
// --------------------------------------------------------------
|
||||
|
||||
static void boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm);
|
||||
static void check_page_free_list(bool only_low_memory);
|
||||
static void check_page_alloc(void);
|
||||
static void check_kern_pgdir(void);
|
||||
static physaddr_t check_va2pa(pde_t *pgdir, uintptr_t va);
|
||||
static void check_page(void);
|
||||
static void check_page_installed_pgdir(void);
|
||||
|
||||
// This simple physical memory allocator is used only while JOS is setting
|
||||
// up its virtual memory system. page_alloc() is the real allocator.
|
||||
//
|
||||
// If n>0, allocates enough pages of contiguous physical memory to hold 'n'
|
||||
// bytes. Doesn't initialize the memory. Returns a kernel virtual address.
|
||||
//
|
||||
// If n==0, returns the address of the next free page without allocating
|
||||
// anything.
|
||||
//
|
||||
// If we're out of memory, boot_alloc should panic.
|
||||
// This function may ONLY be used during initialization,
|
||||
// before the page_free_list list has been set up.
|
||||
static void *
|
||||
boot_alloc(uint32_t n)
|
||||
{
|
||||
static char *nextfree; // virtual address of next byte of free memory
|
||||
char *result;
|
||||
|
||||
// Initialize nextfree if this is the first time.
|
||||
// 'end' is a magic symbol automatically generated by the linker,
|
||||
// which points to the end of the kernel's bss segment:
|
||||
// the first virtual address that the linker did *not* assign
|
||||
// to any kernel code or global variables.
|
||||
if (!nextfree) {
|
||||
extern char end[];
|
||||
nextfree = ROUNDUP((char *) end, PGSIZE);
|
||||
}
|
||||
|
||||
// Allocate a chunk large enough to hold 'n' bytes, then update
|
||||
// nextfree. Make sure nextfree is kept aligned
|
||||
// to a multiple of PGSIZE.
|
||||
//
|
||||
// LAB 2: Your code here.
|
||||
result = nextfree;
|
||||
nextfree = ROUNDUP(nextfree + n, PGSIZE);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// Set up a two-level page table:
|
||||
// kern_pgdir is its linear (virtual) address of the root
|
||||
//
|
||||
// This function only sets up the kernel part of the address space
|
||||
// (ie. addresses >= UTOP). The user part of the address space
|
||||
// will be set up later.
|
||||
//
|
||||
// From UTOP to ULIM, the user is allowed to read but not write.
|
||||
// Above ULIM the user cannot read or write.
|
||||
void
|
||||
mem_init(void)
|
||||
{
|
||||
uint32_t cr0;
|
||||
size_t n;
|
||||
|
||||
// Find out how much memory the machine has (npages & npages_basemem).
|
||||
i386_detect_memory();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// create initial page directory.
|
||||
kern_pgdir = (pde_t *) boot_alloc(PGSIZE);
|
||||
memset(kern_pgdir, 0, PGSIZE);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Recursively insert PD in itself as a page table, to form
|
||||
// a virtual page table at virtual address UVPT.
|
||||
// (For now, you don't have understand the greater purpose of the
|
||||
// following line.)
|
||||
|
||||
// Permissions: kernel R, user R
|
||||
kern_pgdir[PDX(UVPT)] = PADDR(kern_pgdir) | PTE_U | PTE_P;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Allocate an array of npages 'struct PageInfo's and store it in 'pages'.
|
||||
// The kernel uses this array to keep track of physical pages: for
|
||||
// each physical page, there is a corresponding struct PageInfo in this
|
||||
// array. 'npages' is the number of physical pages in memory. Use memset
|
||||
// to initialize all fields of each struct PageInfo to 0.
|
||||
// Your code goes here:
|
||||
size_t pages_size = sizeof(struct PageInfo) * npages;
|
||||
pages = boot_alloc(pages_size);
|
||||
memset(pages, 0, pages_size);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Now that we've allocated the initial kernel data structures, we set
|
||||
// up the list of free physical pages. Once we've done so, all further
|
||||
// memory management will go through the page_* functions. In
|
||||
// particular, we can now map memory using boot_map_region
|
||||
// or page_insert
|
||||
page_init();
|
||||
|
||||
check_page_free_list(1);
|
||||
check_page_alloc();
|
||||
check_page();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Now we set up virtual memory
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Map 'pages' read-only by the user at linear address UPAGES
|
||||
// Permissions:
|
||||
// - the new image at UPAGES -- kernel R, user R
|
||||
// (ie. perm = PTE_U | PTE_P)
|
||||
// - pages itself -- kernel RW, user NONE
|
||||
// Your code goes here:
|
||||
boot_map_region(kern_pgdir,
|
||||
UPAGES, ROUNDUP(pages_size, PGSIZE),
|
||||
PADDR(pages), PTE_U);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Use the physical memory that 'bootstack' refers to as the kernel
|
||||
// stack. The kernel stack grows down from virtual address KSTACKTOP.
|
||||
// We consider the entire range from [KSTACKTOP-PTSIZE, KSTACKTOP)
|
||||
// to be the kernel stack, but break this into two pieces:
|
||||
// * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory
|
||||
// * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed; so if
|
||||
// the kernel overflows its stack, it will fault rather than
|
||||
// overwrite memory. Known as a "guard page".
|
||||
// Permissions: kernel RW, user NONE
|
||||
// Your code goes here:
|
||||
boot_map_region(kern_pgdir,
|
||||
KSTACKTOP-KSTKSIZE, KSTKSIZE,
|
||||
PADDR(bootstack), PTE_W);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Map all of physical memory at KERNBASE.
|
||||
// Ie. the VA range [KERNBASE, 2^32) should map to
|
||||
// the PA range [0, 2^32 - KERNBASE)
|
||||
// We might not have 2^32 - KERNBASE bytes of physical memory, but
|
||||
// we just set up the mapping anyway.
|
||||
// Permissions: kernel RW, user NONE
|
||||
// Your code goes here:
|
||||
boot_map_region(kern_pgdir,
|
||||
KERNBASE, 0x100000000 - KERNBASE,
|
||||
0, PTE_W);
|
||||
|
||||
// Check that the initial page directory has been set up correctly.
|
||||
check_kern_pgdir();
|
||||
|
||||
// Switch from the minimal entry page directory to the full kern_pgdir
|
||||
// page table we just created. Our instruction pointer should be
|
||||
// somewhere between KERNBASE and KERNBASE+4MB right now, which is
|
||||
// mapped the same way by both page tables.
|
||||
//
|
||||
// If the machine reboots at this point, you've probably set up your
|
||||
// kern_pgdir wrong.
|
||||
lcr3(PADDR(kern_pgdir));
|
||||
|
||||
check_page_free_list(0);
|
||||
|
||||
// entry.S set the really important flags in cr0 (including enabling
|
||||
// paging). Here we configure the rest of the flags that we care about.
|
||||
cr0 = rcr0();
|
||||
cr0 |= CR0_PE|CR0_PG|CR0_AM|CR0_WP|CR0_NE|CR0_MP;
|
||||
cr0 &= ~(CR0_TS|CR0_EM);
|
||||
lcr0(cr0);
|
||||
|
||||
// Some more checks, only possible after kern_pgdir is installed.
|
||||
check_page_installed_pgdir();
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------
|
||||
// Tracking of physical pages.
|
||||
// The 'pages' array has one 'struct PageInfo' entry per physical page.
|
||||
// Pages are reference counted, and free pages are kept on a linked list.
|
||||
// --------------------------------------------------------------
|
||||
bool
|
||||
is_reserved(size_t pagenum) {
|
||||
if(pagenum == 0) return true;
|
||||
if(pagenum >= PGNUM(IOPHYSMEM) &&
|
||||
pagenum < PGNUM(PADDR(boot_alloc(0)))) return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
//
|
||||
// Initialize page structure and memory free list.
|
||||
// After this is done, NEVER use boot_alloc again. ONLY use the page
|
||||
// allocator functions below to allocate and deallocate physical
|
||||
// memory via the page_free_list.
|
||||
//
|
||||
void
|
||||
page_init(void)
|
||||
{
|
||||
// The example code here marks all physical pages as free.
|
||||
// However this is not truly the case. What memory is free?
|
||||
// 1) Mark physical page 0 as in use.
|
||||
// This way we preserve the real-mode IDT and BIOS structures
|
||||
// in case we ever need them. (Currently we don't, but...)
|
||||
// 2) The rest of base memory, [PGSIZE, npages_basemem * PGSIZE)
|
||||
// is free.
|
||||
// 3) Then comes the IO hole [IOPHYSMEM, EXTPHYSMEM), which must
|
||||
// never be allocated.
|
||||
// 4) Then extended memory [EXTPHYSMEM, ...).
|
||||
// Some of it is in use, some is free. Where is the kernel
|
||||
// in physical memory? Which pages are already in use for
|
||||
// page tables and other data structures?
|
||||
//
|
||||
// Change the code to reflect this.
|
||||
// NB: DO NOT actually touch the physical memory corresponding to
|
||||
// free pages!
|
||||
size_t i;
|
||||
for (i = 0; i < npages; i++) {
|
||||
if(is_reserved(i)) {
|
||||
pages[i].pp_ref = 1;
|
||||
} else {
|
||||
pages[i].pp_ref = 0;
|
||||
pages[i].pp_link = page_free_list;
|
||||
page_free_list = &pages[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Allocates a physical page. If (alloc_flags & ALLOC_ZERO), fills the entire
|
||||
// returned physical page with '\0' bytes. Does NOT increment the reference
|
||||
// count of the page - the caller must do these if necessary (either explicitly
|
||||
// or via page_insert).
|
||||
//
|
||||
// Be sure to set the pp_link field of the allocated page to NULL so
|
||||
// page_free can check for double-free bugs.
|
||||
//
|
||||
// Returns NULL if out of free memory.
|
||||
//
|
||||
// Hint: use page2kva and memset
|
||||
struct PageInfo *
|
||||
page_alloc(int alloc_flags)
|
||||
{
|
||||
struct PageInfo* to_return = page_free_list;
|
||||
if(to_return == 0) return NULL;
|
||||
|
||||
page_free_list = to_return->pp_link;
|
||||
to_return->pp_link = NULL;
|
||||
|
||||
if(alloc_flags & ALLOC_ZERO) {
|
||||
memset(page2kva(to_return), 0, PGSIZE);
|
||||
}
|
||||
|
||||
return to_return;
|
||||
}
|
||||
|
||||
//
|
||||
// Return a page to the free list.
|
||||
// (This function should only be called when pp->pp_ref reaches 0.)
|
||||
//
|
||||
void
|
||||
page_free(struct PageInfo *pp)
|
||||
{
|
||||
if(pp->pp_ref || pp->pp_link != NULL)
|
||||
panic("Freeing page with nonzero reference count!");
|
||||
pp->pp_link = page_free_list;
|
||||
page_free_list = pp;
|
||||
}
|
||||
|
||||
//
|
||||
// Decrement the reference count on a page,
|
||||
// freeing it if there are no more refs.
|
||||
//
|
||||
void
|
||||
page_decref(struct PageInfo* pp)
|
||||
{
|
||||
if (--pp->pp_ref == 0)
|
||||
page_free(pp);
|
||||
}
|
||||
|
||||
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns
|
||||
// a pointer to the page table entry (PTE) for linear address 'va'.
|
||||
// This requires walking the two-level page table structure.
|
||||
//
|
||||
// The relevant page table page might not exist yet.
|
||||
// If this is true, and create == false, then pgdir_walk returns NULL.
|
||||
// Otherwise, pgdir_walk allocates a new page table page with page_alloc.
|
||||
// - If the allocation fails, pgdir_walk returns NULL.
|
||||
// - Otherwise, the new page's reference count is incremented,
|
||||
// the page is cleared,
|
||||
// and pgdir_walk returns a pointer into the new page table page.
|
||||
//
|
||||
// Hint 1: you can turn a PageInfo * into the physical address of the
|
||||
// page it refers to with page2pa() from kern/pmap.h.
|
||||
//
|
||||
// Hint 2: the x86 MMU checks permission bits in both the page directory
|
||||
// and the page table, so it's safe to leave permissions in the page
|
||||
// directory more permissive than strictly necessary.
|
||||
//
|
||||
// Hint 3: look at inc/mmu.h for useful macros that manipulate page
|
||||
// table and page directory entries.
|
||||
//
|
||||
pte_t *
|
||||
pgdir_walk(pde_t *pgdir, const void *va, int create)
|
||||
{
|
||||
pte_t* base_table = NULL;
|
||||
|
||||
if(pgdir[PDX(va)] & PTE_P) {
|
||||
// We have a valid page table; awesome!
|
||||
base_table = KADDR(PTE_ADDR(pgdir[PDX(va)]));
|
||||
} else {
|
||||
if(!create) return NULL;
|
||||
|
||||
struct PageInfo* page = page_alloc(ALLOC_ZERO);
|
||||
if(!page) return NULL;
|
||||
|
||||
page->pp_ref++;
|
||||
physaddr_t ppa = page2pa(page);
|
||||
pgdir[PDX(va)] = ppa | PTE_P | PTE_U | PTE_W;
|
||||
base_table = KADDR(ppa);
|
||||
}
|
||||
|
||||
// Fill this function in
|
||||
return &base_table[PTX(va)];
|
||||
}
|
||||
|
||||
//
|
||||
// Map [va, va+size) of virtual address space to physical [pa, pa+size)
|
||||
// in the page table rooted at pgdir. Size is a multiple of PGSIZE, and
|
||||
// va and pa are both page-aligned.
|
||||
// Use permission bits perm|PTE_P for the entries.
|
||||
//
|
||||
// This function is only intended to set up the ``static'' mappings
|
||||
// above UTOP. As such, it should *not* change the pp_ref field on the
|
||||
// mapped pages.
|
||||
//
|
||||
// Hint: the TA solution uses pgdir_walk
|
||||
static void
|
||||
boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm)
|
||||
{
|
||||
size_t count = size / PGSIZE;
|
||||
uintptr_t start_va = va;
|
||||
physaddr_t start_pa = pa;
|
||||
while(count-- && start_va <= va && start_pa <= pa) {
|
||||
pte_t* pte = pgdir_walk(pgdir, (void*) va, true);
|
||||
*pte = pa | perm | PTE_P;
|
||||
|
||||
va += PGSIZE;
|
||||
pa += PGSIZE;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Map the physical page 'pp' at virtual address 'va'.
|
||||
// The permissions (the low 12 bits) of the page table entry
|
||||
// should be set to 'perm|PTE_P'.
|
||||
//
|
||||
// Requirements
|
||||
// - If there is already a page mapped at 'va', it should be page_remove()d.
|
||||
// - If necessary, on demand, a page table should be allocated and inserted
|
||||
// into 'pgdir'.
|
||||
// - pp->pp_ref should be incremented if the insertion succeeds.
|
||||
// - The TLB must be invalidated if a page was formerly present at 'va'.
|
||||
//
|
||||
// Corner-case hint: Make sure to consider what happens when the same
|
||||
// pp is re-inserted at the same virtual address in the same pgdir.
|
||||
// However, try not to distinguish this case in your code, as this
|
||||
// frequently leads to subtle bugs; there's an elegant way to handle
|
||||
// everything in one code path.
|
||||
//
|
||||
// RETURNS:
|
||||
// 0 on success
|
||||
// -E_NO_MEM, if page table couldn't be allocated
|
||||
//
|
||||
// Hint: The TA solution is implemented using pgdir_walk, page_remove,
|
||||
// and page2pa.
|
||||
//
|
||||
int
|
||||
page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm)
|
||||
{
|
||||
pte_t* pte;
|
||||
if(!(pte = pgdir_walk(pgdir, va, true))) return -E_NO_MEM;
|
||||
|
||||
pp->pp_ref++;
|
||||
if(*pte & PTE_P) page_remove(pgdir, va);
|
||||
*pte = page2pa(pp) | PTE_P | perm;
|
||||
tlb_invalidate(pgdir, va);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// Return the page mapped at virtual address 'va'.
|
||||
// If pte_store is not zero, then we store in it the address
|
||||
// of the pte for this page. This is used by page_remove and
|
||||
// can be used to verify page permissions for syscall arguments,
|
||||
// but should not be used by most callers.
|
||||
//
|
||||
// Return NULL if there is no page mapped at va.
|
||||
//
|
||||
// Hint: the TA solution uses pgdir_walk and pa2page.
|
||||
//
|
||||
struct PageInfo *
|
||||
page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
|
||||
{
|
||||
pte_t* pte;
|
||||
if(!(pte = pgdir_walk(pgdir, va, false))) {
|
||||
if(pte_store) *pte_store = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct PageInfo* pp = pa2page(PTE_ADDR(*pte));
|
||||
if(pte_store) *pte_store = pte;
|
||||
return pp;
|
||||
}
|
||||
|
||||
//
|
||||
// Unmaps the physical page at virtual address 'va'.
|
||||
// If there is no physical page at that address, silently does nothing.
|
||||
//
|
||||
// Details:
|
||||
// - The ref count on the physical page should decrement.
|
||||
// - The physical page should be freed if the refcount reaches 0.
|
||||
// - The pg table entry corresponding to 'va' should be set to 0.
|
||||
// (if such a PTE exists)
|
||||
// - The TLB must be invalidated if you remove an entry from
|
||||
// the page table.
|
||||
//
|
||||
// Hint: The TA solution is implemented using page_lookup,
|
||||
// tlb_invalidate, and page_decref.
|
||||
//
|
||||
void
|
||||
page_remove(pde_t *pgdir, void *va)
|
||||
{
|
||||
pte_t* pte;
|
||||
struct PageInfo* pp;
|
||||
|
||||
pp = page_lookup(pgdir, va, &pte);
|
||||
if(!(*pte & PTE_P)) return;
|
||||
|
||||
if(!(--(pp->pp_ref))) page_free(pp);
|
||||
*pte = 0;
|
||||
tlb_invalidate(pgdir, va);
|
||||
}
|
||||
|
||||
//
|
||||
// Invalidate a TLB entry, but only if the page tables being
|
||||
// edited are the ones currently in use by the processor.
|
||||
//
|
||||
void
|
||||
tlb_invalidate(pde_t *pgdir, void *va)
|
||||
{
|
||||
// Flush the entry only if we're modifying the current address space.
|
||||
// For now, there is only one address space, so always invalidate.
|
||||
invlpg(va);
|
||||
}
|
||||
|
||||
|
||||
// --------------------------------------------------------------
|
||||
// Checking functions.
|
||||
// --------------------------------------------------------------
|
||||
|
||||
//
|
||||
// Check that the pages on the page_free_list are reasonable.
|
||||
//
|
||||
static void
|
||||
check_page_free_list(bool only_low_memory)
|
||||
{
|
||||
struct PageInfo *pp;
|
||||
unsigned pdx_limit = only_low_memory ? 1 : NPDENTRIES;
|
||||
int nfree_basemem = 0, nfree_extmem = 0;
|
||||
char *first_free_page;
|
||||
|
||||
if (!page_free_list)
|
||||
panic("'page_free_list' is a null pointer!");
|
||||
|
||||
if (only_low_memory) {
|
||||
// Move pages with lower addresses first in the free
|
||||
// list, since entry_pgdir does not map all pages.
|
||||
struct PageInfo *pp1, *pp2;
|
||||
struct PageInfo **tp[2] = { &pp1, &pp2 };
|
||||
for (pp = page_free_list; pp; pp = pp->pp_link) {
|
||||
int pagetype = PDX(page2pa(pp)) >= pdx_limit;
|
||||
*tp[pagetype] = pp;
|
||||
tp[pagetype] = &pp->pp_link;
|
||||
}
|
||||
*tp[1] = 0;
|
||||
*tp[0] = pp2;
|
||||
page_free_list = pp1;
|
||||
}
|
||||
|
||||
// if there's a page that shouldn't be on the free list,
|
||||
// try to make sure it eventually causes trouble.
|
||||
for (pp = page_free_list; pp; pp = pp->pp_link)
|
||||
if (PDX(page2pa(pp)) < pdx_limit)
|
||||
memset(page2kva(pp), 0x97, 128);
|
||||
|
||||
first_free_page = (char *) boot_alloc(0);
|
||||
for (pp = page_free_list; pp; pp = pp->pp_link) {
|
||||
// check that we didn't corrupt the free list itself
|
||||
assert(pp >= pages);
|
||||
assert(pp < pages + npages);
|
||||
assert(((char *) pp - (char *) pages) % sizeof(*pp) == 0);
|
||||
|
||||
// check a few pages that shouldn't be on the free list
|
||||
assert(page2pa(pp) != 0);
|
||||
assert(page2pa(pp) != IOPHYSMEM);
|
||||
assert(page2pa(pp) != EXTPHYSMEM - PGSIZE);
|
||||
assert(page2pa(pp) != EXTPHYSMEM);
|
||||
assert(page2pa(pp) < EXTPHYSMEM || (char *) page2kva(pp) >= first_free_page);
|
||||
|
||||
if (page2pa(pp) < EXTPHYSMEM)
|
||||
++nfree_basemem;
|
||||
else
|
||||
++nfree_extmem;
|
||||
}
|
||||
|
||||
assert(nfree_basemem > 0);
|
||||
assert(nfree_extmem > 0);
|
||||
|
||||
cprintf("check_page_free_list() succeeded!\n");
|
||||
}
|
||||
|
||||
//
|
||||
// Check the physical page allocator (page_alloc(), page_free(),
|
||||
// and page_init()).
|
||||
//
|
||||
static void
|
||||
check_page_alloc(void)
|
||||
{
|
||||
struct PageInfo *pp, *pp0, *pp1, *pp2;
|
||||
int nfree;
|
||||
struct PageInfo *fl;
|
||||
char *c;
|
||||
int i;
|
||||
|
||||
if (!pages)
|
||||
panic("'pages' is a null pointer!");
|
||||
|
||||
// check number of free pages
|
||||
for (pp = page_free_list, nfree = 0; pp; pp = pp->pp_link)
|
||||
++nfree;
|
||||
|
||||
// should be able to allocate three pages
|
||||
pp0 = pp1 = pp2 = 0;
|
||||
assert((pp0 = page_alloc(0)));
|
||||
assert((pp1 = page_alloc(0)));
|
||||
assert((pp2 = page_alloc(0)));
|
||||
|
||||
assert(pp0);
|
||||
assert(pp1 && pp1 != pp0);
|
||||
assert(pp2 && pp2 != pp1 && pp2 != pp0);
|
||||
assert(page2pa(pp0) < npages*PGSIZE);
|
||||
assert(page2pa(pp1) < npages*PGSIZE);
|
||||
assert(page2pa(pp2) < npages*PGSIZE);
|
||||
|
||||
// temporarily steal the rest of the free pages
|
||||
fl = page_free_list;
|
||||
page_free_list = 0;
|
||||
|
||||
// should be no free memory
|
||||
assert(!page_alloc(0));
|
||||
|
||||
// free and re-allocate?
|
||||
page_free(pp0);
|
||||
page_free(pp1);
|
||||
page_free(pp2);
|
||||
pp0 = pp1 = pp2 = 0;
|
||||
assert((pp0 = page_alloc(0)));
|
||||
assert((pp1 = page_alloc(0)));
|
||||
assert((pp2 = page_alloc(0)));
|
||||
assert(pp0);
|
||||
assert(pp1 && pp1 != pp0);
|
||||
assert(pp2 && pp2 != pp1 && pp2 != pp0);
|
||||
assert(!page_alloc(0));
|
||||
|
||||
// test flags
|
||||
memset(page2kva(pp0), 1, PGSIZE);
|
||||
page_free(pp0);
|
||||
assert((pp = page_alloc(ALLOC_ZERO)));
|
||||
assert(pp && pp0 == pp);
|
||||
c = page2kva(pp);
|
||||
for (i = 0; i < PGSIZE; i++)
|
||||
assert(c[i] == 0);
|
||||
|
||||
// give free list back
|
||||
page_free_list = fl;
|
||||
|
||||
// free the pages we took
|
||||
page_free(pp0);
|
||||
page_free(pp1);
|
||||
page_free(pp2);
|
||||
|
||||
// number of free pages should be the same
|
||||
for (pp = page_free_list; pp; pp = pp->pp_link)
|
||||
--nfree;
|
||||
assert(nfree == 0);
|
||||
|
||||
cprintf("check_page_alloc() succeeded!\n");
|
||||
}
|
||||
|
||||
//
|
||||
// Checks that the kernel part of virtual address space
|
||||
// has been set up roughly correctly (by mem_init()).
|
||||
//
|
||||
// This function doesn't test every corner case,
|
||||
// but it is a pretty good sanity check.
|
||||
//
|
||||
|
||||
static void
|
||||
check_kern_pgdir(void)
|
||||
{
|
||||
uint32_t i, n;
|
||||
pde_t *pgdir;
|
||||
|
||||
pgdir = kern_pgdir;
|
||||
|
||||
// check pages array
|
||||
n = ROUNDUP(npages*sizeof(struct PageInfo), PGSIZE);
|
||||
for (i = 0; i < n; i += PGSIZE)
|
||||
assert(check_va2pa(pgdir, UPAGES + i) == PADDR(pages) + i);
|
||||
|
||||
|
||||
// check phys mem
|
||||
for (i = 0; i < npages * PGSIZE; i += PGSIZE)
|
||||
assert(check_va2pa(pgdir, KERNBASE + i) == i);
|
||||
|
||||
// check kernel stack
|
||||
for (i = 0; i < KSTKSIZE; i += PGSIZE)
|
||||
assert(check_va2pa(pgdir, KSTACKTOP - KSTKSIZE + i) == PADDR(bootstack) + i);
|
||||
assert(check_va2pa(pgdir, KSTACKTOP - PTSIZE) == ~0);
|
||||
|
||||
// check PDE permissions
|
||||
for (i = 0; i < NPDENTRIES; i++) {
|
||||
switch (i) {
|
||||
case PDX(UVPT):
|
||||
case PDX(KSTACKTOP-1):
|
||||
case PDX(UPAGES):
|
||||
assert(pgdir[i] & PTE_P);
|
||||
break;
|
||||
default:
|
||||
if (i >= PDX(KERNBASE)) {
|
||||
assert(pgdir[i] & PTE_P);
|
||||
assert(pgdir[i] & PTE_W);
|
||||
} else
|
||||
assert(pgdir[i] == 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
cprintf("check_kern_pgdir() succeeded!\n");
|
||||
}
|
||||
|
||||
// This function returns the physical address of the page containing 'va',
|
||||
// defined by the page directory 'pgdir'. The hardware normally performs
|
||||
// this functionality for us! We define our own version to help check
|
||||
// the check_kern_pgdir() function; it shouldn't be used elsewhere.
|
||||
|
||||
static physaddr_t
|
||||
check_va2pa(pde_t *pgdir, uintptr_t va)
|
||||
{
|
||||
pte_t *p;
|
||||
|
||||
pgdir = &pgdir[PDX(va)];
|
||||
if (!(*pgdir & PTE_P))
|
||||
return ~0;
|
||||
p = (pte_t*) KADDR(PTE_ADDR(*pgdir));
|
||||
if (!(p[PTX(va)] & PTE_P))
|
||||
return ~0;
|
||||
return PTE_ADDR(p[PTX(va)]);
|
||||
}
|
||||
|
||||
|
||||
// check page_insert, page_remove, &c
|
||||
static void
|
||||
check_page(void)
|
||||
{
|
||||
struct PageInfo *pp, *pp0, *pp1, *pp2;
|
||||
struct PageInfo *fl;
|
||||
pte_t *ptep, *ptep1;
|
||||
void *va;
|
||||
int i;
|
||||
extern pde_t entry_pgdir[];
|
||||
|
||||
// should be able to allocate three pages
|
||||
pp0 = pp1 = pp2 = 0;
|
||||
assert((pp0 = page_alloc(0)));
|
||||
assert((pp1 = page_alloc(0)));
|
||||
assert((pp2 = page_alloc(0)));
|
||||
|
||||
assert(pp0);
|
||||
assert(pp1 && pp1 != pp0);
|
||||
assert(pp2 && pp2 != pp1 && pp2 != pp0);
|
||||
|
||||
// temporarily steal the rest of the free pages
|
||||
fl = page_free_list;
|
||||
page_free_list = 0;
|
||||
|
||||
// should be no free memory
|
||||
assert(!page_alloc(0));
|
||||
|
||||
// there is no page allocated at address 0
|
||||
assert(page_lookup(kern_pgdir, (void *) 0x0, &ptep) == NULL);
|
||||
|
||||
// there is no free memory, so we can't allocate a page table
|
||||
assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) < 0);
|
||||
|
||||
// free pp0 and try again: pp0 should be used for page table
|
||||
page_free(pp0);
|
||||
assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) == 0);
|
||||
assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
|
||||
assert(check_va2pa(kern_pgdir, 0x0) == page2pa(pp1));
|
||||
assert(pp1->pp_ref == 1);
|
||||
assert(pp0->pp_ref == 1);
|
||||
|
||||
// should be able to map pp2 at PGSIZE because pp0 is already allocated for page table
|
||||
assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
|
||||
assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
|
||||
assert(pp2->pp_ref == 1);
|
||||
|
||||
// should be no free memory
|
||||
assert(!page_alloc(0));
|
||||
|
||||
// should be able to map pp2 at PGSIZE because it's already there
|
||||
assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
|
||||
assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
|
||||
assert(pp2->pp_ref == 1);
|
||||
|
||||
// pp2 should NOT be on the free list
|
||||
// could happen in ref counts are handled sloppily in page_insert
|
||||
assert(!page_alloc(0));
|
||||
|
||||
// check that pgdir_walk returns a pointer to the pte
|
||||
ptep = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(PGSIZE)]));
|
||||
assert(pgdir_walk(kern_pgdir, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE));
|
||||
|
||||
// should be able to change permissions too.
|
||||
assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W|PTE_U) == 0);
|
||||
assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
|
||||
assert(pp2->pp_ref == 1);
|
||||
assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U);
|
||||
assert(kern_pgdir[0] & PTE_U);
|
||||
|
||||
// should be able to remap with fewer permissions
|
||||
assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
|
||||
assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_W);
|
||||
assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U));
|
||||
|
||||
// should not be able to map at PTSIZE because need free page for page table
|
||||
assert(page_insert(kern_pgdir, pp0, (void*) PTSIZE, PTE_W) < 0);
|
||||
|
||||
// insert pp1 at PGSIZE (replacing pp2)
|
||||
assert(page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W) == 0);
|
||||
assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U));
|
||||
|
||||
// should have pp1 at both 0 and PGSIZE, pp2 nowhere, ...
|
||||
assert(check_va2pa(kern_pgdir, 0) == page2pa(pp1));
|
||||
assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1));
|
||||
// ... and ref counts should reflect this
|
||||
assert(pp1->pp_ref == 2);
|
||||
assert(pp2->pp_ref == 0);
|
||||
|
||||
// pp2 should be returned by page_alloc
|
||||
assert((pp = page_alloc(0)) && pp == pp2);
|
||||
|
||||
// unmapping pp1 at 0 should keep pp1 at PGSIZE
|
||||
page_remove(kern_pgdir, 0x0);
|
||||
assert(check_va2pa(kern_pgdir, 0x0) == ~0);
|
||||
assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1));
|
||||
assert(pp1->pp_ref == 1);
|
||||
assert(pp2->pp_ref == 0);
|
||||
|
||||
// test re-inserting pp1 at PGSIZE
|
||||
assert(page_insert(kern_pgdir, pp1, (void*) PGSIZE, 0) == 0);
|
||||
assert(pp1->pp_ref);
|
||||
assert(pp1->pp_link == NULL);
|
||||
|
||||
// unmapping pp1 at PGSIZE should free it
|
||||
page_remove(kern_pgdir, (void*) PGSIZE);
|
||||
assert(check_va2pa(kern_pgdir, 0x0) == ~0);
|
||||
assert(check_va2pa(kern_pgdir, PGSIZE) == ~0);
|
||||
assert(pp1->pp_ref == 0);
|
||||
assert(pp2->pp_ref == 0);
|
||||
|
||||
// so it should be returned by page_alloc
|
||||
assert((pp = page_alloc(0)) && pp == pp1);
|
||||
|
||||
// should be no free memory
|
||||
assert(!page_alloc(0));
|
||||
|
||||
// forcibly take pp0 back
|
||||
assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
|
||||
kern_pgdir[0] = 0;
|
||||
assert(pp0->pp_ref == 1);
|
||||
pp0->pp_ref = 0;
|
||||
|
||||
// check pointer arithmetic in pgdir_walk
|
||||
page_free(pp0);
|
||||
va = (void*)(PGSIZE * NPDENTRIES + PGSIZE);
|
||||
ptep = pgdir_walk(kern_pgdir, va, 1);
|
||||
ptep1 = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(va)]));
|
||||
assert(ptep == ptep1 + PTX(va));
|
||||
kern_pgdir[PDX(va)] = 0;
|
||||
pp0->pp_ref = 0;
|
||||
|
||||
// check that new page tables get cleared
|
||||
memset(page2kva(pp0), 0xFF, PGSIZE);
|
||||
page_free(pp0);
|
||||
pgdir_walk(kern_pgdir, 0x0, 1);
|
||||
ptep = (pte_t *) page2kva(pp0);
|
||||
for(i=0; i<NPTENTRIES; i++)
|
||||
assert((ptep[i] & PTE_P) == 0);
|
||||
kern_pgdir[0] = 0;
|
||||
pp0->pp_ref = 0;
|
||||
|
||||
// give free list back
|
||||
page_free_list = fl;
|
||||
|
||||
// free the pages we took
|
||||
page_free(pp0);
|
||||
page_free(pp1);
|
||||
page_free(pp2);
|
||||
|
||||
cprintf("check_page() succeeded!\n");
|
||||
}
|
||||
|
||||
// check page_insert, page_remove, &c, with an installed kern_pgdir
|
||||
static void
|
||||
check_page_installed_pgdir(void)
|
||||
{
|
||||
struct PageInfo *pp, *pp0, *pp1, *pp2;
|
||||
struct PageInfo *fl;
|
||||
pte_t *ptep, *ptep1;
|
||||
uintptr_t va;
|
||||
int i;
|
||||
|
||||
// check that we can read and write installed pages
|
||||
pp1 = pp2 = 0;
|
||||
assert((pp0 = page_alloc(0)));
|
||||
assert((pp1 = page_alloc(0)));
|
||||
assert((pp2 = page_alloc(0)));
|
||||
page_free(pp0);
|
||||
memset(page2kva(pp1), 1, PGSIZE);
|
||||
memset(page2kva(pp2), 2, PGSIZE);
|
||||
page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W);
|
||||
assert(pp1->pp_ref == 1);
|
||||
assert(*(uint32_t *)PGSIZE == 0x01010101U);
|
||||
page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W);
|
||||
assert(*(uint32_t *)PGSIZE == 0x02020202U);
|
||||
assert(pp2->pp_ref == 1);
|
||||
assert(pp1->pp_ref == 0);
|
||||
*(uint32_t *)PGSIZE = 0x03030303U;
|
||||
assert(*(uint32_t *)page2kva(pp2) == 0x03030303U);
|
||||
page_remove(kern_pgdir, (void*) PGSIZE);
|
||||
assert(pp2->pp_ref == 0);
|
||||
|
||||
// forcibly take pp0 back
|
||||
assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
|
||||
kern_pgdir[0] = 0;
|
||||
assert(pp0->pp_ref == 1);
|
||||
pp0->pp_ref = 0;
|
||||
|
||||
// free the pages we took
|
||||
page_free(pp0);
|
||||
|
||||
cprintf("check_page_installed_pgdir() succeeded!\n");
|
||||
}
|
87
kern/pmap.h
Normal file
87
kern/pmap.h
Normal file
|
@ -0,0 +1,87 @@
|
|||
/* See COPYRIGHT for copyright information. */
|
||||
|
||||
#ifndef JOS_KERN_PMAP_H
|
||||
#define JOS_KERN_PMAP_H
|
||||
#ifndef JOS_KERNEL
|
||||
# error "This is a JOS kernel header; user programs should not #include it"
|
||||
#endif
|
||||
|
||||
#include <inc/memlayout.h>
|
||||
#include <inc/assert.h>
|
||||
|
||||
extern char bootstacktop[], bootstack[];
|
||||
|
||||
extern struct PageInfo *pages;
|
||||
extern size_t npages;
|
||||
|
||||
extern pde_t *kern_pgdir;
|
||||
|
||||
|
||||
/* This macro takes a kernel virtual address -- an address that points above
|
||||
* KERNBASE, where the machine's maximum 256MB of physical memory is mapped --
|
||||
* and returns the corresponding physical address. It panics if you pass it a
|
||||
* non-kernel virtual address.
|
||||
*/
|
||||
#define PADDR(kva) _paddr(__FILE__, __LINE__, kva)
|
||||
|
||||
static inline physaddr_t
|
||||
_paddr(const char *file, int line, void *kva)
|
||||
{
|
||||
if ((uint32_t)kva < KERNBASE)
|
||||
_panic(file, line, "PADDR called with invalid kva %08lx", kva);
|
||||
return (physaddr_t)kva - KERNBASE;
|
||||
}
|
||||
|
||||
/* This macro takes a physical address and returns the corresponding kernel
|
||||
* virtual address. It panics if you pass an invalid physical address. */
|
||||
#define KADDR(pa) _kaddr(__FILE__, __LINE__, pa)
|
||||
|
||||
static inline void*
|
||||
_kaddr(const char *file, int line, physaddr_t pa)
|
||||
{
|
||||
if (PGNUM(pa) >= npages)
|
||||
_panic(file, line, "KADDR called with invalid pa %08lx", pa);
|
||||
return (void *)(pa + KERNBASE);
|
||||
}
|
||||
|
||||
|
||||
enum {
|
||||
// For page_alloc, zero the returned physical page.
|
||||
ALLOC_ZERO = 1<<0,
|
||||
};
|
||||
|
||||
void mem_init(void);
|
||||
|
||||
void page_init(void);
|
||||
struct PageInfo *page_alloc(int alloc_flags);
|
||||
void page_free(struct PageInfo *pp);
|
||||
int page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm);
|
||||
void page_remove(pde_t *pgdir, void *va);
|
||||
struct PageInfo *page_lookup(pde_t *pgdir, void *va, pte_t **pte_store);
|
||||
void page_decref(struct PageInfo *pp);
|
||||
|
||||
void tlb_invalidate(pde_t *pgdir, void *va);
|
||||
|
||||
static inline physaddr_t
|
||||
page2pa(struct PageInfo *pp)
|
||||
{
|
||||
return (pp - pages) << PGSHIFT;
|
||||
}
|
||||
|
||||
static inline struct PageInfo*
|
||||
pa2page(physaddr_t pa)
|
||||
{
|
||||
if (PGNUM(pa) >= npages)
|
||||
panic("pa2page called with invalid pa");
|
||||
return &pages[PGNUM(pa)];
|
||||
}
|
||||
|
||||
static inline void*
|
||||
page2kva(struct PageInfo *pp)
|
||||
{
|
||||
return KADDR(page2pa(pp));
|
||||
}
|
||||
|
||||
pte_t *pgdir_walk(pde_t *pgdir, const void *va, int create);
|
||||
|
||||
#endif /* !JOS_KERN_PMAP_H */
|
Loading…
Reference in New Issue
Block a user