diff --git a/GNUmakefile b/GNUmakefile index 7bd9e14..115c1bf 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -67,6 +67,7 @@ endif GDBPORT := $(shell expr `id -u` % 5000 + 25000) CC := $(GCCPREFIX)gcc -pipe +GDB := $(GCCPREFIX)gdb AS := $(GCCPREFIX)as AR := $(GCCPREFIX)ar LD := $(GCCPREFIX)ld @@ -148,7 +149,7 @@ QEMUOPTS += $(QEMUEXTRA) sed "s/localhost:1234/localhost:$(GDBPORT)/" < $^ > $@ gdb: - gdb -n -x .gdbinit + $(GDB) -n -x .gdbinit pre-qemu: .gdbinit diff --git a/conf/lab.mk b/conf/lab.mk index af4714d..c175f9c 100644 --- a/conf/lab.mk +++ b/conf/lab.mk @@ -1,2 +1,2 @@ -LAB=1 -PACKAGEDATE=Thu Aug 30 15:16:04 EDT 2018 +LAB=2 +PACKAGEDATE=Wed Sep 12 14:51:29 EDT 2018 diff --git a/grade-lab2 b/grade-lab2 new file mode 100755 index 0000000..77b0d2d --- /dev/null +++ b/grade-lab2 @@ -0,0 +1,28 @@ +#!/usr/bin/env python + +from gradelib import * + +r = Runner(save("jos.out"), + stop_breakpoint("readline")) + +@test(0, "running JOS") +def test_jos(): + r.run_qemu() + +@test(20, "Physical page allocator", parent=test_jos) +def test_check_page_alloc(): + r.match(r"check_page_alloc\(\) succeeded!") + +@test(20, "Page management", parent=test_jos) +def test_check_page(): + r.match(r"check_page\(\) succeeded!") + +@test(20, "Kernel page directory", parent=test_jos) +def test_check_kern_pgdir(): + r.match(r"check_kern_pgdir\(\) succeeded!") + +@test(10, "Page management 2", parent=test_jos) +def test_check_page_installed_pgdir(): + r.match(r"check_page_installed_pgdir\(\) succeeded!") + +run_tests() diff --git a/inc/memlayout.h b/inc/memlayout.h index 88b5730..a537b15 100644 --- a/inc/memlayout.h +++ b/inc/memlayout.h @@ -143,5 +143,46 @@ typedef uint32_t pte_t; typedef uint32_t pde_t; +#if JOS_USER +/* + * The page directory entry corresponding to the virtual address range + * [UVPT, UVPT + PTSIZE) points to the page directory itself. Thus, the page + * directory is treated as a page table as well as a page directory. + * + * One result of treating the page directory as a page table is that all PTEs + * can be accessed through a "virtual page table" at virtual address UVPT (to + * which uvpt is set in lib/entry.S). The PTE for page number N is stored in + * uvpt[N]. (It's worth drawing a diagram of this!) + * + * A second consequence is that the contents of the current page directory + * will always be available at virtual address (UVPT + (UVPT >> PGSHIFT)), to + * which uvpd is set in lib/entry.S. + */ +extern volatile pte_t uvpt[]; // VA of "virtual page table" +extern volatile pde_t uvpd[]; // VA of current page directory +#endif + +/* + * Page descriptor structures, mapped at UPAGES. + * Read/write to the kernel, read-only to user programs. + * + * Each struct PageInfo stores metadata for one physical page. + * Is it NOT the physical page itself, but there is a one-to-one + * correspondence between physical pages and struct PageInfo's. + * You can map a struct PageInfo * to the corresponding physical address + * with page2pa() in kern/pmap.h. + */ +struct PageInfo { + // Next page on the free list. + struct PageInfo *pp_link; + + // pp_ref is the count of pointers (usually in page table entries) + // to this page, for pages allocated using page_alloc. + // Pages allocated at boot time using pmap.c's + // boot_alloc do not have valid reference count fields. + + uint16_t pp_ref; +}; + #endif /* !__ASSEMBLER__ */ #endif /* !JOS_INC_MEMLAYOUT_H */ diff --git a/kern/init.c b/kern/init.c index 30cb91d..1fb9152 100644 --- a/kern/init.c +++ b/kern/init.c @@ -6,18 +6,9 @@ #include #include +#include +#include -// Test the stack backtrace function (lab 1 only) -void -test_backtrace(int x) -{ - cprintf("entering test_backtrace %d\n", x); - if (x > 0) - test_backtrace(x-1); - else - mon_backtrace(0, 0, 0); - cprintf("leaving test_backtrace %d\n", x); -} void i386_init(void) @@ -35,8 +26,8 @@ i386_init(void) cprintf("6828 decimal is %o octal!\n", 6828); - // Test the stack backtrace function (lab 1 only) - test_backtrace(5); + // Lab 2 memory management initialization functions + mem_init(); // Drop into the kernel monitor. while (1) diff --git a/kern/kclock.c b/kern/kclock.c new file mode 100644 index 0000000..08a87f2 --- /dev/null +++ b/kern/kclock.c @@ -0,0 +1,22 @@ +/* See COPYRIGHT for copyright information. */ + +/* Support for reading the NVRAM from the real-time clock. */ + +#include + +#include + + +unsigned +mc146818_read(unsigned reg) +{ + outb(IO_RTC, reg); + return inb(IO_RTC+1); +} + +void +mc146818_write(unsigned reg, unsigned datum) +{ + outb(IO_RTC, reg); + outb(IO_RTC+1, datum); +} diff --git a/kern/kclock.h b/kern/kclock.h new file mode 100644 index 0000000..e409a81 --- /dev/null +++ b/kern/kclock.h @@ -0,0 +1,29 @@ +/* See COPYRIGHT for copyright information. */ + +#ifndef JOS_KERN_KCLOCK_H +#define JOS_KERN_KCLOCK_H +#ifndef JOS_KERNEL +# error "This is a JOS kernel header; user programs should not #include it" +#endif + +#define IO_RTC 0x070 /* RTC port */ + +#define MC_NVRAM_START 0xe /* start of NVRAM: offset 14 */ +#define MC_NVRAM_SIZE 50 /* 50 bytes of NVRAM */ + +/* NVRAM bytes 7 & 8: base memory size */ +#define NVRAM_BASELO (MC_NVRAM_START + 7) /* low byte; RTC off. 0x15 */ +#define NVRAM_BASEHI (MC_NVRAM_START + 8) /* high byte; RTC off. 0x16 */ + +/* NVRAM bytes 9 & 10: extended memory size (between 1MB and 16MB) */ +#define NVRAM_EXTLO (MC_NVRAM_START + 9) /* low byte; RTC off. 0x17 */ +#define NVRAM_EXTHI (MC_NVRAM_START + 10) /* high byte; RTC off. 0x18 */ + +/* NVRAM bytes 38 and 39: extended memory size (between 16MB and 4G) */ +#define NVRAM_EXT16LO (MC_NVRAM_START + 38) /* low byte; RTC off. 0x34 */ +#define NVRAM_EXT16HI (MC_NVRAM_START + 39) /* high byte; RTC off. 0x35 */ + +unsigned mc146818_read(unsigned reg); +void mc146818_write(unsigned reg, unsigned datum); + +#endif // !JOS_KERN_KCLOCK_H diff --git a/kern/pmap.c b/kern/pmap.c new file mode 100644 index 0000000..8c809f1 --- /dev/null +++ b/kern/pmap.c @@ -0,0 +1,841 @@ +/* See COPYRIGHT for copyright information. */ + +#include +#include +#include +#include +#include + +#include +#include + +// These variables are set by i386_detect_memory() +size_t npages; // Amount of physical memory (in pages) +static size_t npages_basemem; // Amount of base memory (in pages) + +// These variables are set in mem_init() +pde_t *kern_pgdir; // Kernel's initial page directory +struct PageInfo *pages; // Physical page state array +static struct PageInfo *page_free_list; // Free list of physical pages + + +// -------------------------------------------------------------- +// Detect machine's physical memory setup. +// -------------------------------------------------------------- + +static int +nvram_read(int r) +{ + return mc146818_read(r) | (mc146818_read(r + 1) << 8); +} + +static void +i386_detect_memory(void) +{ + size_t basemem, extmem, ext16mem, totalmem; + + // Use CMOS calls to measure available base & extended memory. + // (CMOS calls return results in kilobytes.) + basemem = nvram_read(NVRAM_BASELO); + extmem = nvram_read(NVRAM_EXTLO); + ext16mem = nvram_read(NVRAM_EXT16LO) * 64; + + // Calculate the number of physical pages available in both base + // and extended memory. + if (ext16mem) + totalmem = 16 * 1024 + ext16mem; + else if (extmem) + totalmem = 1 * 1024 + extmem; + else + totalmem = basemem; + + npages = totalmem / (PGSIZE / 1024); + npages_basemem = basemem / (PGSIZE / 1024); + + cprintf("Physical memory: %uK available, base = %uK, extended = %uK\n", + totalmem, basemem, totalmem - basemem); +} + + +// -------------------------------------------------------------- +// Set up memory mappings above UTOP. +// -------------------------------------------------------------- + +static void boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm); +static void check_page_free_list(bool only_low_memory); +static void check_page_alloc(void); +static void check_kern_pgdir(void); +static physaddr_t check_va2pa(pde_t *pgdir, uintptr_t va); +static void check_page(void); +static void check_page_installed_pgdir(void); + +// This simple physical memory allocator is used only while JOS is setting +// up its virtual memory system. page_alloc() is the real allocator. +// +// If n>0, allocates enough pages of contiguous physical memory to hold 'n' +// bytes. Doesn't initialize the memory. Returns a kernel virtual address. +// +// If n==0, returns the address of the next free page without allocating +// anything. +// +// If we're out of memory, boot_alloc should panic. +// This function may ONLY be used during initialization, +// before the page_free_list list has been set up. +static void * +boot_alloc(uint32_t n) +{ + static char *nextfree; // virtual address of next byte of free memory + char *result; + + // Initialize nextfree if this is the first time. + // 'end' is a magic symbol automatically generated by the linker, + // which points to the end of the kernel's bss segment: + // the first virtual address that the linker did *not* assign + // to any kernel code or global variables. + if (!nextfree) { + extern char end[]; + nextfree = ROUNDUP((char *) end, PGSIZE); + } + + // Allocate a chunk large enough to hold 'n' bytes, then update + // nextfree. Make sure nextfree is kept aligned + // to a multiple of PGSIZE. + // + // LAB 2: Your code here. + + return NULL; +} + +// Set up a two-level page table: +// kern_pgdir is its linear (virtual) address of the root +// +// This function only sets up the kernel part of the address space +// (ie. addresses >= UTOP). The user part of the address space +// will be set up later. +// +// From UTOP to ULIM, the user is allowed to read but not write. +// Above ULIM the user cannot read or write. +void +mem_init(void) +{ + uint32_t cr0; + size_t n; + + // Find out how much memory the machine has (npages & npages_basemem). + i386_detect_memory(); + + // Remove this line when you're ready to test this function. + panic("mem_init: This function is not finished\n"); + + ////////////////////////////////////////////////////////////////////// + // create initial page directory. + kern_pgdir = (pde_t *) boot_alloc(PGSIZE); + memset(kern_pgdir, 0, PGSIZE); + + ////////////////////////////////////////////////////////////////////// + // Recursively insert PD in itself as a page table, to form + // a virtual page table at virtual address UVPT. + // (For now, you don't have understand the greater purpose of the + // following line.) + + // Permissions: kernel R, user R + kern_pgdir[PDX(UVPT)] = PADDR(kern_pgdir) | PTE_U | PTE_P; + + ////////////////////////////////////////////////////////////////////// + // Allocate an array of npages 'struct PageInfo's and store it in 'pages'. + // The kernel uses this array to keep track of physical pages: for + // each physical page, there is a corresponding struct PageInfo in this + // array. 'npages' is the number of physical pages in memory. Use memset + // to initialize all fields of each struct PageInfo to 0. + // Your code goes here: + + + ////////////////////////////////////////////////////////////////////// + // Now that we've allocated the initial kernel data structures, we set + // up the list of free physical pages. Once we've done so, all further + // memory management will go through the page_* functions. In + // particular, we can now map memory using boot_map_region + // or page_insert + page_init(); + + check_page_free_list(1); + check_page_alloc(); + check_page(); + + ////////////////////////////////////////////////////////////////////// + // Now we set up virtual memory + + ////////////////////////////////////////////////////////////////////// + // Map 'pages' read-only by the user at linear address UPAGES + // Permissions: + // - the new image at UPAGES -- kernel R, user R + // (ie. perm = PTE_U | PTE_P) + // - pages itself -- kernel RW, user NONE + // Your code goes here: + + ////////////////////////////////////////////////////////////////////// + // Use the physical memory that 'bootstack' refers to as the kernel + // stack. The kernel stack grows down from virtual address KSTACKTOP. + // We consider the entire range from [KSTACKTOP-PTSIZE, KSTACKTOP) + // to be the kernel stack, but break this into two pieces: + // * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory + // * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed; so if + // the kernel overflows its stack, it will fault rather than + // overwrite memory. Known as a "guard page". + // Permissions: kernel RW, user NONE + // Your code goes here: + + ////////////////////////////////////////////////////////////////////// + // Map all of physical memory at KERNBASE. + // Ie. the VA range [KERNBASE, 2^32) should map to + // the PA range [0, 2^32 - KERNBASE) + // We might not have 2^32 - KERNBASE bytes of physical memory, but + // we just set up the mapping anyway. + // Permissions: kernel RW, user NONE + // Your code goes here: + + // Check that the initial page directory has been set up correctly. + check_kern_pgdir(); + + // Switch from the minimal entry page directory to the full kern_pgdir + // page table we just created. Our instruction pointer should be + // somewhere between KERNBASE and KERNBASE+4MB right now, which is + // mapped the same way by both page tables. + // + // If the machine reboots at this point, you've probably set up your + // kern_pgdir wrong. + lcr3(PADDR(kern_pgdir)); + + check_page_free_list(0); + + // entry.S set the really important flags in cr0 (including enabling + // paging). Here we configure the rest of the flags that we care about. + cr0 = rcr0(); + cr0 |= CR0_PE|CR0_PG|CR0_AM|CR0_WP|CR0_NE|CR0_MP; + cr0 &= ~(CR0_TS|CR0_EM); + lcr0(cr0); + + // Some more checks, only possible after kern_pgdir is installed. + check_page_installed_pgdir(); +} + +// -------------------------------------------------------------- +// Tracking of physical pages. +// The 'pages' array has one 'struct PageInfo' entry per physical page. +// Pages are reference counted, and free pages are kept on a linked list. +// -------------------------------------------------------------- + +// +// Initialize page structure and memory free list. +// After this is done, NEVER use boot_alloc again. ONLY use the page +// allocator functions below to allocate and deallocate physical +// memory via the page_free_list. +// +void +page_init(void) +{ + // The example code here marks all physical pages as free. + // However this is not truly the case. What memory is free? + // 1) Mark physical page 0 as in use. + // This way we preserve the real-mode IDT and BIOS structures + // in case we ever need them. (Currently we don't, but...) + // 2) The rest of base memory, [PGSIZE, npages_basemem * PGSIZE) + // is free. + // 3) Then comes the IO hole [IOPHYSMEM, EXTPHYSMEM), which must + // never be allocated. + // 4) Then extended memory [EXTPHYSMEM, ...). + // Some of it is in use, some is free. Where is the kernel + // in physical memory? Which pages are already in use for + // page tables and other data structures? + // + // Change the code to reflect this. + // NB: DO NOT actually touch the physical memory corresponding to + // free pages! + size_t i; + for (i = 0; i < npages; i++) { + pages[i].pp_ref = 0; + pages[i].pp_link = page_free_list; + page_free_list = &pages[i]; + } +} + +// +// Allocates a physical page. If (alloc_flags & ALLOC_ZERO), fills the entire +// returned physical page with '\0' bytes. Does NOT increment the reference +// count of the page - the caller must do these if necessary (either explicitly +// or via page_insert). +// +// Be sure to set the pp_link field of the allocated page to NULL so +// page_free can check for double-free bugs. +// +// Returns NULL if out of free memory. +// +// Hint: use page2kva and memset +struct PageInfo * +page_alloc(int alloc_flags) +{ + // Fill this function in + return 0; +} + +// +// Return a page to the free list. +// (This function should only be called when pp->pp_ref reaches 0.) +// +void +page_free(struct PageInfo *pp) +{ + // Fill this function in + // Hint: You may want to panic if pp->pp_ref is nonzero or + // pp->pp_link is not NULL. +} + +// +// Decrement the reference count on a page, +// freeing it if there are no more refs. +// +void +page_decref(struct PageInfo* pp) +{ + if (--pp->pp_ref == 0) + page_free(pp); +} + +// Given 'pgdir', a pointer to a page directory, pgdir_walk returns +// a pointer to the page table entry (PTE) for linear address 'va'. +// This requires walking the two-level page table structure. +// +// The relevant page table page might not exist yet. +// If this is true, and create == false, then pgdir_walk returns NULL. +// Otherwise, pgdir_walk allocates a new page table page with page_alloc. +// - If the allocation fails, pgdir_walk returns NULL. +// - Otherwise, the new page's reference count is incremented, +// the page is cleared, +// and pgdir_walk returns a pointer into the new page table page. +// +// Hint 1: you can turn a PageInfo * into the physical address of the +// page it refers to with page2pa() from kern/pmap.h. +// +// Hint 2: the x86 MMU checks permission bits in both the page directory +// and the page table, so it's safe to leave permissions in the page +// directory more permissive than strictly necessary. +// +// Hint 3: look at inc/mmu.h for useful macros that manipulate page +// table and page directory entries. +// +pte_t * +pgdir_walk(pde_t *pgdir, const void *va, int create) +{ + // Fill this function in + return NULL; +} + +// +// Map [va, va+size) of virtual address space to physical [pa, pa+size) +// in the page table rooted at pgdir. Size is a multiple of PGSIZE, and +// va and pa are both page-aligned. +// Use permission bits perm|PTE_P for the entries. +// +// This function is only intended to set up the ``static'' mappings +// above UTOP. As such, it should *not* change the pp_ref field on the +// mapped pages. +// +// Hint: the TA solution uses pgdir_walk +static void +boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm) +{ + // Fill this function in +} + +// +// Map the physical page 'pp' at virtual address 'va'. +// The permissions (the low 12 bits) of the page table entry +// should be set to 'perm|PTE_P'. +// +// Requirements +// - If there is already a page mapped at 'va', it should be page_remove()d. +// - If necessary, on demand, a page table should be allocated and inserted +// into 'pgdir'. +// - pp->pp_ref should be incremented if the insertion succeeds. +// - The TLB must be invalidated if a page was formerly present at 'va'. +// +// Corner-case hint: Make sure to consider what happens when the same +// pp is re-inserted at the same virtual address in the same pgdir. +// However, try not to distinguish this case in your code, as this +// frequently leads to subtle bugs; there's an elegant way to handle +// everything in one code path. +// +// RETURNS: +// 0 on success +// -E_NO_MEM, if page table couldn't be allocated +// +// Hint: The TA solution is implemented using pgdir_walk, page_remove, +// and page2pa. +// +int +page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm) +{ + // Fill this function in + return 0; +} + +// +// Return the page mapped at virtual address 'va'. +// If pte_store is not zero, then we store in it the address +// of the pte for this page. This is used by page_remove and +// can be used to verify page permissions for syscall arguments, +// but should not be used by most callers. +// +// Return NULL if there is no page mapped at va. +// +// Hint: the TA solution uses pgdir_walk and pa2page. +// +struct PageInfo * +page_lookup(pde_t *pgdir, void *va, pte_t **pte_store) +{ + // Fill this function in + return NULL; +} + +// +// Unmaps the physical page at virtual address 'va'. +// If there is no physical page at that address, silently does nothing. +// +// Details: +// - The ref count on the physical page should decrement. +// - The physical page should be freed if the refcount reaches 0. +// - The pg table entry corresponding to 'va' should be set to 0. +// (if such a PTE exists) +// - The TLB must be invalidated if you remove an entry from +// the page table. +// +// Hint: The TA solution is implemented using page_lookup, +// tlb_invalidate, and page_decref. +// +void +page_remove(pde_t *pgdir, void *va) +{ + // Fill this function in +} + +// +// Invalidate a TLB entry, but only if the page tables being +// edited are the ones currently in use by the processor. +// +void +tlb_invalidate(pde_t *pgdir, void *va) +{ + // Flush the entry only if we're modifying the current address space. + // For now, there is only one address space, so always invalidate. + invlpg(va); +} + + +// -------------------------------------------------------------- +// Checking functions. +// -------------------------------------------------------------- + +// +// Check that the pages on the page_free_list are reasonable. +// +static void +check_page_free_list(bool only_low_memory) +{ + struct PageInfo *pp; + unsigned pdx_limit = only_low_memory ? 1 : NPDENTRIES; + int nfree_basemem = 0, nfree_extmem = 0; + char *first_free_page; + + if (!page_free_list) + panic("'page_free_list' is a null pointer!"); + + if (only_low_memory) { + // Move pages with lower addresses first in the free + // list, since entry_pgdir does not map all pages. + struct PageInfo *pp1, *pp2; + struct PageInfo **tp[2] = { &pp1, &pp2 }; + for (pp = page_free_list; pp; pp = pp->pp_link) { + int pagetype = PDX(page2pa(pp)) >= pdx_limit; + *tp[pagetype] = pp; + tp[pagetype] = &pp->pp_link; + } + *tp[1] = 0; + *tp[0] = pp2; + page_free_list = pp1; + } + + // if there's a page that shouldn't be on the free list, + // try to make sure it eventually causes trouble. + for (pp = page_free_list; pp; pp = pp->pp_link) + if (PDX(page2pa(pp)) < pdx_limit) + memset(page2kva(pp), 0x97, 128); + + first_free_page = (char *) boot_alloc(0); + for (pp = page_free_list; pp; pp = pp->pp_link) { + // check that we didn't corrupt the free list itself + assert(pp >= pages); + assert(pp < pages + npages); + assert(((char *) pp - (char *) pages) % sizeof(*pp) == 0); + + // check a few pages that shouldn't be on the free list + assert(page2pa(pp) != 0); + assert(page2pa(pp) != IOPHYSMEM); + assert(page2pa(pp) != EXTPHYSMEM - PGSIZE); + assert(page2pa(pp) != EXTPHYSMEM); + assert(page2pa(pp) < EXTPHYSMEM || (char *) page2kva(pp) >= first_free_page); + + if (page2pa(pp) < EXTPHYSMEM) + ++nfree_basemem; + else + ++nfree_extmem; + } + + assert(nfree_basemem > 0); + assert(nfree_extmem > 0); + + cprintf("check_page_free_list() succeeded!\n"); +} + +// +// Check the physical page allocator (page_alloc(), page_free(), +// and page_init()). +// +static void +check_page_alloc(void) +{ + struct PageInfo *pp, *pp0, *pp1, *pp2; + int nfree; + struct PageInfo *fl; + char *c; + int i; + + if (!pages) + panic("'pages' is a null pointer!"); + + // check number of free pages + for (pp = page_free_list, nfree = 0; pp; pp = pp->pp_link) + ++nfree; + + // should be able to allocate three pages + pp0 = pp1 = pp2 = 0; + assert((pp0 = page_alloc(0))); + assert((pp1 = page_alloc(0))); + assert((pp2 = page_alloc(0))); + + assert(pp0); + assert(pp1 && pp1 != pp0); + assert(pp2 && pp2 != pp1 && pp2 != pp0); + assert(page2pa(pp0) < npages*PGSIZE); + assert(page2pa(pp1) < npages*PGSIZE); + assert(page2pa(pp2) < npages*PGSIZE); + + // temporarily steal the rest of the free pages + fl = page_free_list; + page_free_list = 0; + + // should be no free memory + assert(!page_alloc(0)); + + // free and re-allocate? + page_free(pp0); + page_free(pp1); + page_free(pp2); + pp0 = pp1 = pp2 = 0; + assert((pp0 = page_alloc(0))); + assert((pp1 = page_alloc(0))); + assert((pp2 = page_alloc(0))); + assert(pp0); + assert(pp1 && pp1 != pp0); + assert(pp2 && pp2 != pp1 && pp2 != pp0); + assert(!page_alloc(0)); + + // test flags + memset(page2kva(pp0), 1, PGSIZE); + page_free(pp0); + assert((pp = page_alloc(ALLOC_ZERO))); + assert(pp && pp0 == pp); + c = page2kva(pp); + for (i = 0; i < PGSIZE; i++) + assert(c[i] == 0); + + // give free list back + page_free_list = fl; + + // free the pages we took + page_free(pp0); + page_free(pp1); + page_free(pp2); + + // number of free pages should be the same + for (pp = page_free_list; pp; pp = pp->pp_link) + --nfree; + assert(nfree == 0); + + cprintf("check_page_alloc() succeeded!\n"); +} + +// +// Checks that the kernel part of virtual address space +// has been set up roughly correctly (by mem_init()). +// +// This function doesn't test every corner case, +// but it is a pretty good sanity check. +// + +static void +check_kern_pgdir(void) +{ + uint32_t i, n; + pde_t *pgdir; + + pgdir = kern_pgdir; + + // check pages array + n = ROUNDUP(npages*sizeof(struct PageInfo), PGSIZE); + for (i = 0; i < n; i += PGSIZE) + assert(check_va2pa(pgdir, UPAGES + i) == PADDR(pages) + i); + + + // check phys mem + for (i = 0; i < npages * PGSIZE; i += PGSIZE) + assert(check_va2pa(pgdir, KERNBASE + i) == i); + + // check kernel stack + for (i = 0; i < KSTKSIZE; i += PGSIZE) + assert(check_va2pa(pgdir, KSTACKTOP - KSTKSIZE + i) == PADDR(bootstack) + i); + assert(check_va2pa(pgdir, KSTACKTOP - PTSIZE) == ~0); + + // check PDE permissions + for (i = 0; i < NPDENTRIES; i++) { + switch (i) { + case PDX(UVPT): + case PDX(KSTACKTOP-1): + case PDX(UPAGES): + assert(pgdir[i] & PTE_P); + break; + default: + if (i >= PDX(KERNBASE)) { + assert(pgdir[i] & PTE_P); + assert(pgdir[i] & PTE_W); + } else + assert(pgdir[i] == 0); + break; + } + } + cprintf("check_kern_pgdir() succeeded!\n"); +} + +// This function returns the physical address of the page containing 'va', +// defined by the page directory 'pgdir'. The hardware normally performs +// this functionality for us! We define our own version to help check +// the check_kern_pgdir() function; it shouldn't be used elsewhere. + +static physaddr_t +check_va2pa(pde_t *pgdir, uintptr_t va) +{ + pte_t *p; + + pgdir = &pgdir[PDX(va)]; + if (!(*pgdir & PTE_P)) + return ~0; + p = (pte_t*) KADDR(PTE_ADDR(*pgdir)); + if (!(p[PTX(va)] & PTE_P)) + return ~0; + return PTE_ADDR(p[PTX(va)]); +} + + +// check page_insert, page_remove, &c +static void +check_page(void) +{ + struct PageInfo *pp, *pp0, *pp1, *pp2; + struct PageInfo *fl; + pte_t *ptep, *ptep1; + void *va; + int i; + extern pde_t entry_pgdir[]; + + // should be able to allocate three pages + pp0 = pp1 = pp2 = 0; + assert((pp0 = page_alloc(0))); + assert((pp1 = page_alloc(0))); + assert((pp2 = page_alloc(0))); + + assert(pp0); + assert(pp1 && pp1 != pp0); + assert(pp2 && pp2 != pp1 && pp2 != pp0); + + // temporarily steal the rest of the free pages + fl = page_free_list; + page_free_list = 0; + + // should be no free memory + assert(!page_alloc(0)); + + // there is no page allocated at address 0 + assert(page_lookup(kern_pgdir, (void *) 0x0, &ptep) == NULL); + + // there is no free memory, so we can't allocate a page table + assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) < 0); + + // free pp0 and try again: pp0 should be used for page table + page_free(pp0); + assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) == 0); + assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0)); + assert(check_va2pa(kern_pgdir, 0x0) == page2pa(pp1)); + assert(pp1->pp_ref == 1); + assert(pp0->pp_ref == 1); + + // should be able to map pp2 at PGSIZE because pp0 is already allocated for page table + assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0); + assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2)); + assert(pp2->pp_ref == 1); + + // should be no free memory + assert(!page_alloc(0)); + + // should be able to map pp2 at PGSIZE because it's already there + assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0); + assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2)); + assert(pp2->pp_ref == 1); + + // pp2 should NOT be on the free list + // could happen in ref counts are handled sloppily in page_insert + assert(!page_alloc(0)); + + // check that pgdir_walk returns a pointer to the pte + ptep = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(PGSIZE)])); + assert(pgdir_walk(kern_pgdir, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE)); + + // should be able to change permissions too. + assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W|PTE_U) == 0); + assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2)); + assert(pp2->pp_ref == 1); + assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U); + assert(kern_pgdir[0] & PTE_U); + + // should be able to remap with fewer permissions + assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0); + assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_W); + assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U)); + + // should not be able to map at PTSIZE because need free page for page table + assert(page_insert(kern_pgdir, pp0, (void*) PTSIZE, PTE_W) < 0); + + // insert pp1 at PGSIZE (replacing pp2) + assert(page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W) == 0); + assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U)); + + // should have pp1 at both 0 and PGSIZE, pp2 nowhere, ... + assert(check_va2pa(kern_pgdir, 0) == page2pa(pp1)); + assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1)); + // ... and ref counts should reflect this + assert(pp1->pp_ref == 2); + assert(pp2->pp_ref == 0); + + // pp2 should be returned by page_alloc + assert((pp = page_alloc(0)) && pp == pp2); + + // unmapping pp1 at 0 should keep pp1 at PGSIZE + page_remove(kern_pgdir, 0x0); + assert(check_va2pa(kern_pgdir, 0x0) == ~0); + assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1)); + assert(pp1->pp_ref == 1); + assert(pp2->pp_ref == 0); + + // test re-inserting pp1 at PGSIZE + assert(page_insert(kern_pgdir, pp1, (void*) PGSIZE, 0) == 0); + assert(pp1->pp_ref); + assert(pp1->pp_link == NULL); + + // unmapping pp1 at PGSIZE should free it + page_remove(kern_pgdir, (void*) PGSIZE); + assert(check_va2pa(kern_pgdir, 0x0) == ~0); + assert(check_va2pa(kern_pgdir, PGSIZE) == ~0); + assert(pp1->pp_ref == 0); + assert(pp2->pp_ref == 0); + + // so it should be returned by page_alloc + assert((pp = page_alloc(0)) && pp == pp1); + + // should be no free memory + assert(!page_alloc(0)); + + // forcibly take pp0 back + assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0)); + kern_pgdir[0] = 0; + assert(pp0->pp_ref == 1); + pp0->pp_ref = 0; + + // check pointer arithmetic in pgdir_walk + page_free(pp0); + va = (void*)(PGSIZE * NPDENTRIES + PGSIZE); + ptep = pgdir_walk(kern_pgdir, va, 1); + ptep1 = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(va)])); + assert(ptep == ptep1 + PTX(va)); + kern_pgdir[PDX(va)] = 0; + pp0->pp_ref = 0; + + // check that new page tables get cleared + memset(page2kva(pp0), 0xFF, PGSIZE); + page_free(pp0); + pgdir_walk(kern_pgdir, 0x0, 1); + ptep = (pte_t *) page2kva(pp0); + for(i=0; ipp_ref = 0; + + // give free list back + page_free_list = fl; + + // free the pages we took + page_free(pp0); + page_free(pp1); + page_free(pp2); + + cprintf("check_page() succeeded!\n"); +} + +// check page_insert, page_remove, &c, with an installed kern_pgdir +static void +check_page_installed_pgdir(void) +{ + struct PageInfo *pp, *pp0, *pp1, *pp2; + struct PageInfo *fl; + pte_t *ptep, *ptep1; + uintptr_t va; + int i; + + // check that we can read and write installed pages + pp1 = pp2 = 0; + assert((pp0 = page_alloc(0))); + assert((pp1 = page_alloc(0))); + assert((pp2 = page_alloc(0))); + page_free(pp0); + memset(page2kva(pp1), 1, PGSIZE); + memset(page2kva(pp2), 2, PGSIZE); + page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W); + assert(pp1->pp_ref == 1); + assert(*(uint32_t *)PGSIZE == 0x01010101U); + page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W); + assert(*(uint32_t *)PGSIZE == 0x02020202U); + assert(pp2->pp_ref == 1); + assert(pp1->pp_ref == 0); + *(uint32_t *)PGSIZE = 0x03030303U; + assert(*(uint32_t *)page2kva(pp2) == 0x03030303U); + page_remove(kern_pgdir, (void*) PGSIZE); + assert(pp2->pp_ref == 0); + + // forcibly take pp0 back + assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0)); + kern_pgdir[0] = 0; + assert(pp0->pp_ref == 1); + pp0->pp_ref = 0; + + // free the pages we took + page_free(pp0); + + cprintf("check_page_installed_pgdir() succeeded!\n"); +} diff --git a/kern/pmap.h b/kern/pmap.h new file mode 100644 index 0000000..950cca1 --- /dev/null +++ b/kern/pmap.h @@ -0,0 +1,87 @@ +/* See COPYRIGHT for copyright information. */ + +#ifndef JOS_KERN_PMAP_H +#define JOS_KERN_PMAP_H +#ifndef JOS_KERNEL +# error "This is a JOS kernel header; user programs should not #include it" +#endif + +#include +#include + +extern char bootstacktop[], bootstack[]; + +extern struct PageInfo *pages; +extern size_t npages; + +extern pde_t *kern_pgdir; + + +/* This macro takes a kernel virtual address -- an address that points above + * KERNBASE, where the machine's maximum 256MB of physical memory is mapped -- + * and returns the corresponding physical address. It panics if you pass it a + * non-kernel virtual address. + */ +#define PADDR(kva) _paddr(__FILE__, __LINE__, kva) + +static inline physaddr_t +_paddr(const char *file, int line, void *kva) +{ + if ((uint32_t)kva < KERNBASE) + _panic(file, line, "PADDR called with invalid kva %08lx", kva); + return (physaddr_t)kva - KERNBASE; +} + +/* This macro takes a physical address and returns the corresponding kernel + * virtual address. It panics if you pass an invalid physical address. */ +#define KADDR(pa) _kaddr(__FILE__, __LINE__, pa) + +static inline void* +_kaddr(const char *file, int line, physaddr_t pa) +{ + if (PGNUM(pa) >= npages) + _panic(file, line, "KADDR called with invalid pa %08lx", pa); + return (void *)(pa + KERNBASE); +} + + +enum { + // For page_alloc, zero the returned physical page. + ALLOC_ZERO = 1<<0, +}; + +void mem_init(void); + +void page_init(void); +struct PageInfo *page_alloc(int alloc_flags); +void page_free(struct PageInfo *pp); +int page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm); +void page_remove(pde_t *pgdir, void *va); +struct PageInfo *page_lookup(pde_t *pgdir, void *va, pte_t **pte_store); +void page_decref(struct PageInfo *pp); + +void tlb_invalidate(pde_t *pgdir, void *va); + +static inline physaddr_t +page2pa(struct PageInfo *pp) +{ + return (pp - pages) << PGSHIFT; +} + +static inline struct PageInfo* +pa2page(physaddr_t pa) +{ + if (PGNUM(pa) >= npages) + panic("pa2page called with invalid pa"); + return &pages[PGNUM(pa)]; +} + +static inline void* +page2kva(struct PageInfo *pp) +{ + return KADDR(page2pa(pp)); +} + +pte_t *pgdir_walk(pde_t *pgdir, const void *va, int create); + +#endif /* !JOS_KERN_PMAP_H */