2018-09-12 11:55:07 -07:00
|
|
|
/* See COPYRIGHT for copyright information. */
|
|
|
|
|
|
|
|
#include <inc/x86.h>
|
|
|
|
#include <inc/mmu.h>
|
|
|
|
#include <inc/error.h>
|
|
|
|
#include <inc/string.h>
|
|
|
|
#include <inc/assert.h>
|
|
|
|
|
2019-04-23 20:37:53 -07:00
|
|
|
#include <kern/monitor.h>
|
2018-09-12 11:55:07 -07:00
|
|
|
#include <kern/pmap.h>
|
|
|
|
#include <kern/kclock.h>
|
2018-09-25 09:22:51 -07:00
|
|
|
#include <kern/env.h>
|
2018-10-06 06:52:47 -07:00
|
|
|
#include <kern/cpu.h>
|
2018-09-12 11:55:07 -07:00
|
|
|
|
|
|
|
// These variables are set by i386_detect_memory()
|
|
|
|
size_t npages; // Amount of physical memory (in pages)
|
|
|
|
static size_t npages_basemem; // Amount of base memory (in pages)
|
|
|
|
|
|
|
|
// These variables are set in mem_init()
|
|
|
|
pde_t *kern_pgdir; // Kernel's initial page directory
|
|
|
|
struct PageInfo *pages; // Physical page state array
|
|
|
|
static struct PageInfo *page_free_list; // Free list of physical pages
|
|
|
|
|
|
|
|
|
|
|
|
// --------------------------------------------------------------
|
|
|
|
// Detect machine's physical memory setup.
|
|
|
|
// --------------------------------------------------------------
|
|
|
|
|
|
|
|
static int
|
|
|
|
nvram_read(int r)
|
|
|
|
{
|
|
|
|
return mc146818_read(r) | (mc146818_read(r + 1) << 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
i386_detect_memory(void)
|
|
|
|
{
|
|
|
|
size_t basemem, extmem, ext16mem, totalmem;
|
|
|
|
|
|
|
|
// Use CMOS calls to measure available base & extended memory.
|
|
|
|
// (CMOS calls return results in kilobytes.)
|
|
|
|
basemem = nvram_read(NVRAM_BASELO);
|
|
|
|
extmem = nvram_read(NVRAM_EXTLO);
|
|
|
|
ext16mem = nvram_read(NVRAM_EXT16LO) * 64;
|
|
|
|
|
|
|
|
// Calculate the number of physical pages available in both base
|
|
|
|
// and extended memory.
|
|
|
|
if (ext16mem)
|
|
|
|
totalmem = 16 * 1024 + ext16mem;
|
|
|
|
else if (extmem)
|
|
|
|
totalmem = 1 * 1024 + extmem;
|
|
|
|
else
|
|
|
|
totalmem = basemem;
|
|
|
|
|
|
|
|
npages = totalmem / (PGSIZE / 1024);
|
|
|
|
npages_basemem = basemem / (PGSIZE / 1024);
|
|
|
|
|
|
|
|
cprintf("Physical memory: %uK available, base = %uK, extended = %uK\n",
|
|
|
|
totalmem, basemem, totalmem - basemem);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// --------------------------------------------------------------
|
|
|
|
// Set up memory mappings above UTOP.
|
|
|
|
// --------------------------------------------------------------
|
|
|
|
|
2018-10-06 06:52:47 -07:00
|
|
|
static void mem_init_mp(void);
|
2018-09-12 11:55:07 -07:00
|
|
|
static void boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm);
|
|
|
|
static void check_page_free_list(bool only_low_memory);
|
|
|
|
static void check_page_alloc(void);
|
|
|
|
static void check_kern_pgdir(void);
|
|
|
|
static physaddr_t check_va2pa(pde_t *pgdir, uintptr_t va);
|
|
|
|
static void check_page(void);
|
|
|
|
static void check_page_installed_pgdir(void);
|
|
|
|
|
|
|
|
// This simple physical memory allocator is used only while JOS is setting
|
|
|
|
// up its virtual memory system. page_alloc() is the real allocator.
|
|
|
|
//
|
|
|
|
// If n>0, allocates enough pages of contiguous physical memory to hold 'n'
|
|
|
|
// bytes. Doesn't initialize the memory. Returns a kernel virtual address.
|
|
|
|
//
|
|
|
|
// If n==0, returns the address of the next free page without allocating
|
|
|
|
// anything.
|
|
|
|
//
|
|
|
|
// If we're out of memory, boot_alloc should panic.
|
|
|
|
// This function may ONLY be used during initialization,
|
|
|
|
// before the page_free_list list has been set up.
|
|
|
|
static void *
|
|
|
|
boot_alloc(uint32_t n)
|
|
|
|
{
|
|
|
|
static char *nextfree; // virtual address of next byte of free memory
|
|
|
|
char *result;
|
|
|
|
|
|
|
|
// Initialize nextfree if this is the first time.
|
|
|
|
// 'end' is a magic symbol automatically generated by the linker,
|
|
|
|
// which points to the end of the kernel's bss segment:
|
|
|
|
// the first virtual address that the linker did *not* assign
|
|
|
|
// to any kernel code or global variables.
|
|
|
|
if (!nextfree) {
|
|
|
|
extern char end[];
|
|
|
|
nextfree = ROUNDUP((char *) end, PGSIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allocate a chunk large enough to hold 'n' bytes, then update
|
|
|
|
// nextfree. Make sure nextfree is kept aligned
|
|
|
|
// to a multiple of PGSIZE.
|
|
|
|
//
|
|
|
|
// LAB 2: Your code here.
|
2019-04-19 01:14:51 -07:00
|
|
|
result = nextfree;
|
|
|
|
nextfree = ROUNDUP(nextfree + n, PGSIZE);
|
2018-09-12 11:55:07 -07:00
|
|
|
|
2019-04-19 01:14:51 -07:00
|
|
|
return result;
|
2018-09-12 11:55:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set up a two-level page table:
|
|
|
|
// kern_pgdir is its linear (virtual) address of the root
|
|
|
|
//
|
|
|
|
// This function only sets up the kernel part of the address space
|
|
|
|
// (ie. addresses >= UTOP). The user part of the address space
|
|
|
|
// will be set up later.
|
|
|
|
//
|
|
|
|
// From UTOP to ULIM, the user is allowed to read but not write.
|
|
|
|
// Above ULIM the user cannot read or write.
|
|
|
|
void
|
|
|
|
mem_init(void)
|
|
|
|
{
|
|
|
|
uint32_t cr0;
|
|
|
|
size_t n;
|
|
|
|
|
|
|
|
// Find out how much memory the machine has (npages & npages_basemem).
|
|
|
|
i386_detect_memory();
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
// create initial page directory.
|
|
|
|
kern_pgdir = (pde_t *) boot_alloc(PGSIZE);
|
|
|
|
memset(kern_pgdir, 0, PGSIZE);
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
// Recursively insert PD in itself as a page table, to form
|
|
|
|
// a virtual page table at virtual address UVPT.
|
|
|
|
// (For now, you don't have understand the greater purpose of the
|
|
|
|
// following line.)
|
|
|
|
|
|
|
|
// Permissions: kernel R, user R
|
|
|
|
kern_pgdir[PDX(UVPT)] = PADDR(kern_pgdir) | PTE_U | PTE_P;
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
// Allocate an array of npages 'struct PageInfo's and store it in 'pages'.
|
|
|
|
// The kernel uses this array to keep track of physical pages: for
|
|
|
|
// each physical page, there is a corresponding struct PageInfo in this
|
|
|
|
// array. 'npages' is the number of physical pages in memory. Use memset
|
|
|
|
// to initialize all fields of each struct PageInfo to 0.
|
|
|
|
// Your code goes here:
|
2019-04-19 01:14:51 -07:00
|
|
|
size_t pages_size = sizeof(struct PageInfo) * npages;
|
|
|
|
pages = boot_alloc(pages_size);
|
|
|
|
memset(pages, 0, pages_size);
|
2018-09-12 11:55:07 -07:00
|
|
|
|
2018-09-25 09:22:51 -07:00
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
// Make 'envs' point to an array of size 'NENV' of 'struct Env'.
|
|
|
|
// LAB 3: Your code here.
|
2019-04-22 22:16:32 -07:00
|
|
|
size_t envs_size = sizeof(struct Env) * NENV;
|
|
|
|
envs = boot_alloc(envs_size);
|
|
|
|
memset(envs, 0, envs_size);
|
2018-09-25 09:22:51 -07:00
|
|
|
|
2018-09-12 11:55:07 -07:00
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
// Now that we've allocated the initial kernel data structures, we set
|
|
|
|
// up the list of free physical pages. Once we've done so, all further
|
|
|
|
// memory management will go through the page_* functions. In
|
|
|
|
// particular, we can now map memory using boot_map_region
|
|
|
|
// or page_insert
|
|
|
|
page_init();
|
|
|
|
|
|
|
|
check_page_free_list(1);
|
|
|
|
check_page_alloc();
|
|
|
|
check_page();
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
// Now we set up virtual memory
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
// Map 'pages' read-only by the user at linear address UPAGES
|
|
|
|
// Permissions:
|
|
|
|
// - the new image at UPAGES -- kernel R, user R
|
|
|
|
// (ie. perm = PTE_U | PTE_P)
|
|
|
|
// - pages itself -- kernel RW, user NONE
|
|
|
|
// Your code goes here:
|
2019-04-19 01:14:51 -07:00
|
|
|
boot_map_region(kern_pgdir,
|
|
|
|
UPAGES, ROUNDUP(pages_size, PGSIZE),
|
2019-04-23 17:09:54 -07:00
|
|
|
PADDR(pages), PTE_U);
|
|
|
|
kern_pgdir[PDX(UPAGES)] |= PTE_U;
|
2019-04-19 01:14:51 -07:00
|
|
|
kern_pgdir[PDX(UPAGES)] &= ~PTE_W;
|
2018-09-12 11:55:07 -07:00
|
|
|
|
2018-09-25 09:22:51 -07:00
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
// Map the 'envs' array read-only by the user at linear address UENVS
|
|
|
|
// (ie. perm = PTE_U | PTE_P).
|
|
|
|
// Permissions:
|
|
|
|
// - the new image at UENVS -- kernel R, user R
|
|
|
|
// - envs itself -- kernel RW, user NONE
|
|
|
|
// LAB 3: Your code here.
|
2019-04-22 22:16:32 -07:00
|
|
|
boot_map_region(kern_pgdir,
|
|
|
|
UENVS, ROUNDUP(envs_size, PGSIZE),
|
2019-04-23 17:09:54 -07:00
|
|
|
PADDR(envs), PTE_U);
|
2019-04-22 22:16:32 -07:00
|
|
|
kern_pgdir[PDX(UENVS)] |= PTE_U;
|
|
|
|
kern_pgdir[PDX(UPAGES)] &= ~PTE_W;
|
2018-09-25 09:22:51 -07:00
|
|
|
|
2018-09-12 11:55:07 -07:00
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
// Use the physical memory that 'bootstack' refers to as the kernel
|
|
|
|
// stack. The kernel stack grows down from virtual address KSTACKTOP.
|
|
|
|
// We consider the entire range from [KSTACKTOP-PTSIZE, KSTACKTOP)
|
|
|
|
// to be the kernel stack, but break this into two pieces:
|
|
|
|
// * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory
|
|
|
|
// * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed; so if
|
|
|
|
// the kernel overflows its stack, it will fault rather than
|
|
|
|
// overwrite memory. Known as a "guard page".
|
|
|
|
// Permissions: kernel RW, user NONE
|
|
|
|
// Your code goes here:
|
2019-04-19 01:14:51 -07:00
|
|
|
boot_map_region(kern_pgdir,
|
|
|
|
KSTACKTOP-KSTKSIZE, KSTKSIZE,
|
|
|
|
PADDR(bootstack), PTE_W);
|
2019-04-22 22:16:32 -07:00
|
|
|
kern_pgdir[PDX(KSTACKTOP-KSTKSIZE)] |= PTE_W;
|
2019-04-19 01:14:51 -07:00
|
|
|
kern_pgdir[PDX(KSTACKTOP-KSTKSIZE)] &= ~PTE_U;
|
2018-09-12 11:55:07 -07:00
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
// Map all of physical memory at KERNBASE.
|
|
|
|
// Ie. the VA range [KERNBASE, 2^32) should map to
|
|
|
|
// the PA range [0, 2^32 - KERNBASE)
|
|
|
|
// We might not have 2^32 - KERNBASE bytes of physical memory, but
|
|
|
|
// we just set up the mapping anyway.
|
|
|
|
// Permissions: kernel RW, user NONE
|
|
|
|
// Your code goes here:
|
2019-04-19 01:14:51 -07:00
|
|
|
boot_map_region(kern_pgdir,
|
|
|
|
KERNBASE, 0x100000000 - KERNBASE,
|
|
|
|
0, PTE_W);
|
|
|
|
kern_pgdir[PDX(KERNBASE)] |= PTE_W | PTE_P;
|
|
|
|
kern_pgdir[PDX(KERNBASE)] &= ~PTE_U;
|
|
|
|
|
2018-09-12 11:55:07 -07:00
|
|
|
|
2018-10-06 06:52:47 -07:00
|
|
|
// Initialize the SMP-related parts of the memory map
|
|
|
|
mem_init_mp();
|
|
|
|
|
2018-09-12 11:55:07 -07:00
|
|
|
// Check that the initial page directory has been set up correctly.
|
|
|
|
check_kern_pgdir();
|
|
|
|
|
|
|
|
// Switch from the minimal entry page directory to the full kern_pgdir
|
|
|
|
// page table we just created. Our instruction pointer should be
|
|
|
|
// somewhere between KERNBASE and KERNBASE+4MB right now, which is
|
|
|
|
// mapped the same way by both page tables.
|
|
|
|
//
|
|
|
|
// If the machine reboots at this point, you've probably set up your
|
|
|
|
// kern_pgdir wrong.
|
|
|
|
lcr3(PADDR(kern_pgdir));
|
|
|
|
|
|
|
|
check_page_free_list(0);
|
|
|
|
|
|
|
|
// entry.S set the really important flags in cr0 (including enabling
|
|
|
|
// paging). Here we configure the rest of the flags that we care about.
|
|
|
|
cr0 = rcr0();
|
|
|
|
cr0 |= CR0_PE|CR0_PG|CR0_AM|CR0_WP|CR0_NE|CR0_MP;
|
|
|
|
cr0 &= ~(CR0_TS|CR0_EM);
|
|
|
|
lcr0(cr0);
|
|
|
|
|
|
|
|
// Some more checks, only possible after kern_pgdir is installed.
|
|
|
|
check_page_installed_pgdir();
|
|
|
|
}
|
|
|
|
|
2018-10-06 06:52:47 -07:00
|
|
|
// Modify mappings in kern_pgdir to support SMP
|
|
|
|
// - Map the per-CPU stacks in the region [KSTACKTOP-PTSIZE, KSTACKTOP)
|
|
|
|
//
|
|
|
|
static void
|
|
|
|
mem_init_mp(void)
|
|
|
|
{
|
|
|
|
// Map per-CPU stacks starting at KSTACKTOP, for up to 'NCPU' CPUs.
|
|
|
|
//
|
|
|
|
// For CPU i, use the physical memory that 'percpu_kstacks[i]' refers
|
|
|
|
// to as its kernel stack. CPU i's kernel stack grows down from virtual
|
|
|
|
// address kstacktop_i = KSTACKTOP - i * (KSTKSIZE + KSTKGAP), and is
|
|
|
|
// divided into two pieces, just like the single stack you set up in
|
|
|
|
// mem_init:
|
|
|
|
// * [kstacktop_i - KSTKSIZE, kstacktop_i)
|
|
|
|
// -- backed by physical memory
|
|
|
|
// * [kstacktop_i - (KSTKSIZE + KSTKGAP), kstacktop_i - KSTKSIZE)
|
|
|
|
// -- not backed; so if the kernel overflows its stack,
|
|
|
|
// it will fault rather than overwrite another CPU's stack.
|
|
|
|
// Known as a "guard page".
|
|
|
|
// Permissions: kernel RW, user NONE
|
|
|
|
//
|
|
|
|
// LAB 4: Your code here:
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-09-12 11:55:07 -07:00
|
|
|
// --------------------------------------------------------------
|
|
|
|
// Tracking of physical pages.
|
|
|
|
// The 'pages' array has one 'struct PageInfo' entry per physical page.
|
|
|
|
// Pages are reference counted, and free pages are kept on a linked list.
|
|
|
|
// --------------------------------------------------------------
|
2019-04-19 01:14:51 -07:00
|
|
|
bool
|
|
|
|
is_reserved(size_t pagenum) {
|
|
|
|
if(pagenum == 0) return true;
|
|
|
|
if(pagenum >= PGNUM(IOPHYSMEM) &&
|
|
|
|
pagenum < PGNUM(PADDR(boot_alloc(0)))) return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2018-09-12 11:55:07 -07:00
|
|
|
|
|
|
|
//
|
|
|
|
// Initialize page structure and memory free list.
|
|
|
|
// After this is done, NEVER use boot_alloc again. ONLY use the page
|
|
|
|
// allocator functions below to allocate and deallocate physical
|
|
|
|
// memory via the page_free_list.
|
|
|
|
//
|
|
|
|
void
|
|
|
|
page_init(void)
|
|
|
|
{
|
2018-10-06 06:52:47 -07:00
|
|
|
// LAB 4:
|
|
|
|
// Change your code to mark the physical page at MPENTRY_PADDR
|
|
|
|
// as in use
|
|
|
|
|
2018-09-12 11:55:07 -07:00
|
|
|
// The example code here marks all physical pages as free.
|
|
|
|
// However this is not truly the case. What memory is free?
|
|
|
|
// 1) Mark physical page 0 as in use.
|
|
|
|
// This way we preserve the real-mode IDT and BIOS structures
|
|
|
|
// in case we ever need them. (Currently we don't, but...)
|
|
|
|
// 2) The rest of base memory, [PGSIZE, npages_basemem * PGSIZE)
|
|
|
|
// is free.
|
|
|
|
// 3) Then comes the IO hole [IOPHYSMEM, EXTPHYSMEM), which must
|
|
|
|
// never be allocated.
|
|
|
|
// 4) Then extended memory [EXTPHYSMEM, ...).
|
|
|
|
// Some of it is in use, some is free. Where is the kernel
|
|
|
|
// in physical memory? Which pages are already in use for
|
|
|
|
// page tables and other data structures?
|
|
|
|
//
|
|
|
|
// Change the code to reflect this.
|
|
|
|
// NB: DO NOT actually touch the physical memory corresponding to
|
|
|
|
// free pages!
|
|
|
|
size_t i;
|
|
|
|
for (i = 0; i < npages; i++) {
|
2019-04-19 01:14:51 -07:00
|
|
|
if(is_reserved(i)) {
|
|
|
|
pages[i].pp_ref = 1;
|
|
|
|
} else {
|
|
|
|
pages[i].pp_ref = 0;
|
|
|
|
pages[i].pp_link = page_free_list;
|
|
|
|
page_free_list = &pages[i];
|
|
|
|
}
|
2018-09-12 11:55:07 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Allocates a physical page. If (alloc_flags & ALLOC_ZERO), fills the entire
|
|
|
|
// returned physical page with '\0' bytes. Does NOT increment the reference
|
|
|
|
// count of the page - the caller must do these if necessary (either explicitly
|
|
|
|
// or via page_insert).
|
|
|
|
//
|
|
|
|
// Be sure to set the pp_link field of the allocated page to NULL so
|
|
|
|
// page_free can check for double-free bugs.
|
|
|
|
//
|
|
|
|
// Returns NULL if out of free memory.
|
|
|
|
//
|
|
|
|
// Hint: use page2kva and memset
|
|
|
|
struct PageInfo *
|
|
|
|
page_alloc(int alloc_flags)
|
|
|
|
{
|
2019-04-19 01:14:51 -07:00
|
|
|
struct PageInfo* to_return = page_free_list;
|
|
|
|
if(to_return == 0) return NULL;
|
|
|
|
|
|
|
|
page_free_list = to_return->pp_link;
|
|
|
|
to_return->pp_link = NULL;
|
|
|
|
|
|
|
|
if(alloc_flags & ALLOC_ZERO) {
|
|
|
|
memset(page2kva(to_return), 0, PGSIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
return to_return;
|
2018-09-12 11:55:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Return a page to the free list.
|
|
|
|
// (This function should only be called when pp->pp_ref reaches 0.)
|
|
|
|
//
|
|
|
|
void
|
|
|
|
page_free(struct PageInfo *pp)
|
|
|
|
{
|
2019-04-19 01:14:51 -07:00
|
|
|
if(pp->pp_ref || pp->pp_link != NULL)
|
|
|
|
panic("Freeing page with nonzero reference count!");
|
|
|
|
pp->pp_link = page_free_list;
|
|
|
|
page_free_list = pp;
|
2018-09-12 11:55:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Decrement the reference count on a page,
|
|
|
|
// freeing it if there are no more refs.
|
|
|
|
//
|
|
|
|
void
|
|
|
|
page_decref(struct PageInfo* pp)
|
|
|
|
{
|
|
|
|
if (--pp->pp_ref == 0)
|
|
|
|
page_free(pp);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns
|
|
|
|
// a pointer to the page table entry (PTE) for linear address 'va'.
|
|
|
|
// This requires walking the two-level page table structure.
|
|
|
|
//
|
|
|
|
// The relevant page table page might not exist yet.
|
|
|
|
// If this is true, and create == false, then pgdir_walk returns NULL.
|
|
|
|
// Otherwise, pgdir_walk allocates a new page table page with page_alloc.
|
|
|
|
// - If the allocation fails, pgdir_walk returns NULL.
|
|
|
|
// - Otherwise, the new page's reference count is incremented,
|
|
|
|
// the page is cleared,
|
|
|
|
// and pgdir_walk returns a pointer into the new page table page.
|
|
|
|
//
|
|
|
|
// Hint 1: you can turn a PageInfo * into the physical address of the
|
|
|
|
// page it refers to with page2pa() from kern/pmap.h.
|
|
|
|
//
|
|
|
|
// Hint 2: the x86 MMU checks permission bits in both the page directory
|
|
|
|
// and the page table, so it's safe to leave permissions in the page
|
|
|
|
// directory more permissive than strictly necessary.
|
|
|
|
//
|
|
|
|
// Hint 3: look at inc/mmu.h for useful macros that manipulate page
|
|
|
|
// table and page directory entries.
|
|
|
|
//
|
|
|
|
pte_t *
|
|
|
|
pgdir_walk(pde_t *pgdir, const void *va, int create)
|
|
|
|
{
|
2019-04-19 01:14:51 -07:00
|
|
|
pte_t* base_table = NULL;
|
|
|
|
|
|
|
|
if(pgdir[PDX(va)] & PTE_P) {
|
|
|
|
// We have a valid page table; awesome!
|
|
|
|
base_table = KADDR(PTE_ADDR(pgdir[PDX(va)]));
|
|
|
|
} else {
|
|
|
|
if(!create) return NULL;
|
|
|
|
|
|
|
|
struct PageInfo* page = page_alloc(ALLOC_ZERO);
|
|
|
|
if(!page) return NULL;
|
|
|
|
|
|
|
|
page->pp_ref++;
|
|
|
|
physaddr_t ppa = page2pa(page);
|
|
|
|
pgdir[PDX(va)] = ppa | PTE_P | PTE_U | PTE_W;
|
|
|
|
base_table = KADDR(ppa);
|
|
|
|
}
|
|
|
|
|
2018-09-12 11:55:07 -07:00
|
|
|
// Fill this function in
|
2019-04-19 01:14:51 -07:00
|
|
|
return &base_table[PTX(va)];
|
2018-09-12 11:55:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Map [va, va+size) of virtual address space to physical [pa, pa+size)
|
|
|
|
// in the page table rooted at pgdir. Size is a multiple of PGSIZE, and
|
|
|
|
// va and pa are both page-aligned.
|
|
|
|
// Use permission bits perm|PTE_P for the entries.
|
|
|
|
//
|
|
|
|
// This function is only intended to set up the ``static'' mappings
|
|
|
|
// above UTOP. As such, it should *not* change the pp_ref field on the
|
|
|
|
// mapped pages.
|
|
|
|
//
|
|
|
|
// Hint: the TA solution uses pgdir_walk
|
|
|
|
static void
|
|
|
|
boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm)
|
|
|
|
{
|
2019-04-19 01:14:51 -07:00
|
|
|
size_t count = size / PGSIZE;
|
|
|
|
uintptr_t start_va = va;
|
|
|
|
physaddr_t start_pa = pa;
|
|
|
|
while(count-- && start_va <= va && start_pa <= pa) {
|
|
|
|
pte_t* pte = pgdir_walk(pgdir, (void*) va, true);
|
|
|
|
*pte = pa | perm | PTE_P;
|
|
|
|
|
|
|
|
va += PGSIZE;
|
|
|
|
pa += PGSIZE;
|
|
|
|
}
|
2018-09-12 11:55:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Map the physical page 'pp' at virtual address 'va'.
|
|
|
|
// The permissions (the low 12 bits) of the page table entry
|
|
|
|
// should be set to 'perm|PTE_P'.
|
|
|
|
//
|
|
|
|
// Requirements
|
|
|
|
// - If there is already a page mapped at 'va', it should be page_remove()d.
|
|
|
|
// - If necessary, on demand, a page table should be allocated and inserted
|
|
|
|
// into 'pgdir'.
|
|
|
|
// - pp->pp_ref should be incremented if the insertion succeeds.
|
|
|
|
// - The TLB must be invalidated if a page was formerly present at 'va'.
|
|
|
|
//
|
|
|
|
// Corner-case hint: Make sure to consider what happens when the same
|
|
|
|
// pp is re-inserted at the same virtual address in the same pgdir.
|
|
|
|
// However, try not to distinguish this case in your code, as this
|
|
|
|
// frequently leads to subtle bugs; there's an elegant way to handle
|
|
|
|
// everything in one code path.
|
|
|
|
//
|
|
|
|
// RETURNS:
|
|
|
|
// 0 on success
|
|
|
|
// -E_NO_MEM, if page table couldn't be allocated
|
|
|
|
//
|
|
|
|
// Hint: The TA solution is implemented using pgdir_walk, page_remove,
|
|
|
|
// and page2pa.
|
|
|
|
//
|
|
|
|
int
|
|
|
|
page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm)
|
|
|
|
{
|
2019-04-19 01:14:51 -07:00
|
|
|
pte_t* pte;
|
|
|
|
if(!(pte = pgdir_walk(pgdir, va, true))) return -E_NO_MEM;
|
|
|
|
|
|
|
|
pp->pp_ref++;
|
|
|
|
if(*pte & PTE_P) page_remove(pgdir, va);
|
|
|
|
*pte = page2pa(pp) | PTE_P | perm;
|
|
|
|
tlb_invalidate(pgdir, va);
|
|
|
|
|
2018-09-12 11:55:07 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Return the page mapped at virtual address 'va'.
|
|
|
|
// If pte_store is not zero, then we store in it the address
|
|
|
|
// of the pte for this page. This is used by page_remove and
|
|
|
|
// can be used to verify page permissions for syscall arguments,
|
|
|
|
// but should not be used by most callers.
|
|
|
|
//
|
|
|
|
// Return NULL if there is no page mapped at va.
|
|
|
|
//
|
|
|
|
// Hint: the TA solution uses pgdir_walk and pa2page.
|
|
|
|
//
|
|
|
|
struct PageInfo *
|
|
|
|
page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
|
|
|
|
{
|
2019-04-19 01:14:51 -07:00
|
|
|
pte_t* pte;
|
|
|
|
if(!(pte = pgdir_walk(pgdir, va, false))) {
|
|
|
|
if(pte_store) *pte_store = NULL;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct PageInfo* pp = pa2page(PTE_ADDR(*pte));
|
|
|
|
if(pte_store) *pte_store = pte;
|
|
|
|
return pp;
|
2018-09-12 11:55:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Unmaps the physical page at virtual address 'va'.
|
|
|
|
// If there is no physical page at that address, silently does nothing.
|
|
|
|
//
|
|
|
|
// Details:
|
|
|
|
// - The ref count on the physical page should decrement.
|
|
|
|
// - The physical page should be freed if the refcount reaches 0.
|
|
|
|
// - The pg table entry corresponding to 'va' should be set to 0.
|
|
|
|
// (if such a PTE exists)
|
|
|
|
// - The TLB must be invalidated if you remove an entry from
|
|
|
|
// the page table.
|
|
|
|
//
|
|
|
|
// Hint: The TA solution is implemented using page_lookup,
|
|
|
|
// tlb_invalidate, and page_decref.
|
|
|
|
//
|
|
|
|
void
|
|
|
|
page_remove(pde_t *pgdir, void *va)
|
|
|
|
{
|
2019-04-19 01:14:51 -07:00
|
|
|
pte_t* pte;
|
|
|
|
struct PageInfo* pp;
|
|
|
|
|
|
|
|
pp = page_lookup(pgdir, va, &pte);
|
|
|
|
if(!(*pte & PTE_P)) return;
|
|
|
|
|
|
|
|
if(!(--(pp->pp_ref))) page_free(pp);
|
|
|
|
*pte = 0;
|
|
|
|
tlb_invalidate(pgdir, va);
|
2018-09-12 11:55:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Invalidate a TLB entry, but only if the page tables being
|
|
|
|
// edited are the ones currently in use by the processor.
|
|
|
|
//
|
|
|
|
void
|
|
|
|
tlb_invalidate(pde_t *pgdir, void *va)
|
|
|
|
{
|
|
|
|
// Flush the entry only if we're modifying the current address space.
|
2018-10-06 06:52:47 -07:00
|
|
|
if (!curenv || curenv->env_pgdir == pgdir)
|
|
|
|
invlpg(va);
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Reserve size bytes in the MMIO region and map [pa,pa+size) at this
|
|
|
|
// location. Return the base of the reserved region. size does *not*
|
|
|
|
// have to be multiple of PGSIZE.
|
|
|
|
//
|
|
|
|
void *
|
|
|
|
mmio_map_region(physaddr_t pa, size_t size)
|
|
|
|
{
|
|
|
|
// Where to start the next region. Initially, this is the
|
|
|
|
// beginning of the MMIO region. Because this is static, its
|
|
|
|
// value will be preserved between calls to mmio_map_region
|
|
|
|
// (just like nextfree in boot_alloc).
|
|
|
|
static uintptr_t base = MMIOBASE;
|
|
|
|
|
|
|
|
// Reserve size bytes of virtual memory starting at base and
|
|
|
|
// map physical pages [pa,pa+size) to virtual addresses
|
|
|
|
// [base,base+size). Since this is device memory and not
|
|
|
|
// regular DRAM, you'll have to tell the CPU that it isn't
|
|
|
|
// safe to cache access to this memory. Luckily, the page
|
|
|
|
// tables provide bits for this purpose; simply create the
|
|
|
|
// mapping with PTE_PCD|PTE_PWT (cache-disable and
|
|
|
|
// write-through) in addition to PTE_W. (If you're interested
|
|
|
|
// in more details on this, see section 10.5 of IA32 volume
|
|
|
|
// 3A.)
|
|
|
|
//
|
|
|
|
// Be sure to round size up to a multiple of PGSIZE and to
|
|
|
|
// handle if this reservation would overflow MMIOLIM (it's
|
|
|
|
// okay to simply panic if this happens).
|
|
|
|
//
|
|
|
|
// Hint: The staff solution uses boot_map_region.
|
|
|
|
//
|
|
|
|
// Your code here:
|
|
|
|
panic("mmio_map_region not implemented");
|
2018-09-12 11:55:07 -07:00
|
|
|
}
|
|
|
|
|
2018-09-25 09:22:51 -07:00
|
|
|
static uintptr_t user_mem_check_addr;
|
|
|
|
|
|
|
|
//
|
|
|
|
// Check that an environment is allowed to access the range of memory
|
|
|
|
// [va, va+len) with permissions 'perm | PTE_P'.
|
|
|
|
// Normally 'perm' will contain PTE_U at least, but this is not required.
|
|
|
|
// 'va' and 'len' need not be page-aligned; you must test every page that
|
|
|
|
// contains any of that range. You will test either 'len/PGSIZE',
|
|
|
|
// 'len/PGSIZE + 1', or 'len/PGSIZE + 2' pages.
|
|
|
|
//
|
|
|
|
// A user program can access a virtual address if (1) the address is below
|
|
|
|
// ULIM, and (2) the page table gives it permission. These are exactly
|
|
|
|
// the tests you should implement here.
|
|
|
|
//
|
|
|
|
// If there is an error, set the 'user_mem_check_addr' variable to the first
|
|
|
|
// erroneous virtual address.
|
|
|
|
//
|
|
|
|
// Returns 0 if the user program can access this range of addresses,
|
|
|
|
// and -E_FAULT otherwise.
|
|
|
|
//
|
|
|
|
int
|
|
|
|
user_mem_check(struct Env *env, const void *va, size_t len, int perm)
|
|
|
|
{
|
|
|
|
// LAB 3: Your code here.
|
2019-04-23 20:46:11 -07:00
|
|
|
uintptr_t to_report = (uintptr_t) va;
|
2019-04-23 20:37:53 -07:00
|
|
|
const char* bottom = ROUNDDOWN(va, PGSIZE);
|
|
|
|
size_t aligned_count = ROUNDUP(len, PGSIZE) / PGSIZE;
|
|
|
|
pde_t mask = PTE_P | PTE_U | PTE_W;
|
|
|
|
pde_t perms = mask;
|
|
|
|
|
|
|
|
#define VALID ((perms & (perm | PTE_P)) == (perm | PTE_P))
|
2019-04-23 20:46:11 -07:00
|
|
|
#define DO_CHECK if(!VALID) { user_mem_check_addr = to_report; return -E_FAULT; }
|
2019-04-23 20:37:53 -07:00
|
|
|
|
|
|
|
while(aligned_count--) {
|
|
|
|
perms &= env->env_pgdir[PDX(bottom)] & mask;
|
|
|
|
DO_CHECK;
|
|
|
|
perms &= (*pgdir_walk(env->env_pgdir, bottom, 0)) & mask;
|
|
|
|
DO_CHECK;
|
|
|
|
bottom += PGSIZE;
|
2019-04-23 20:46:11 -07:00
|
|
|
to_report = (uintptr_t) bottom;
|
2019-04-23 20:37:53 -07:00
|
|
|
}
|
2018-09-25 09:22:51 -07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Checks that environment 'env' is allowed to access the range
|
|
|
|
// of memory [va, va+len) with permissions 'perm | PTE_U | PTE_P'.
|
|
|
|
// If it can, then the function simply returns.
|
|
|
|
// If it cannot, 'env' is destroyed and, if env is the current
|
|
|
|
// environment, this function will not return.
|
|
|
|
//
|
|
|
|
void
|
|
|
|
user_mem_assert(struct Env *env, const void *va, size_t len, int perm)
|
|
|
|
{
|
|
|
|
if (user_mem_check(env, va, len, perm | PTE_U) < 0) {
|
|
|
|
cprintf("[%08x] user_mem_check assertion failure for "
|
|
|
|
"va %08x\n", env->env_id, user_mem_check_addr);
|
|
|
|
env_destroy(env); // may not return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-12 11:55:07 -07:00
|
|
|
|
|
|
|
// --------------------------------------------------------------
|
|
|
|
// Checking functions.
|
|
|
|
// --------------------------------------------------------------
|
|
|
|
|
|
|
|
//
|
|
|
|
// Check that the pages on the page_free_list are reasonable.
|
|
|
|
//
|
|
|
|
static void
|
|
|
|
check_page_free_list(bool only_low_memory)
|
|
|
|
{
|
|
|
|
struct PageInfo *pp;
|
|
|
|
unsigned pdx_limit = only_low_memory ? 1 : NPDENTRIES;
|
|
|
|
int nfree_basemem = 0, nfree_extmem = 0;
|
|
|
|
char *first_free_page;
|
|
|
|
|
|
|
|
if (!page_free_list)
|
|
|
|
panic("'page_free_list' is a null pointer!");
|
|
|
|
|
|
|
|
if (only_low_memory) {
|
|
|
|
// Move pages with lower addresses first in the free
|
|
|
|
// list, since entry_pgdir does not map all pages.
|
|
|
|
struct PageInfo *pp1, *pp2;
|
|
|
|
struct PageInfo **tp[2] = { &pp1, &pp2 };
|
|
|
|
for (pp = page_free_list; pp; pp = pp->pp_link) {
|
|
|
|
int pagetype = PDX(page2pa(pp)) >= pdx_limit;
|
|
|
|
*tp[pagetype] = pp;
|
|
|
|
tp[pagetype] = &pp->pp_link;
|
|
|
|
}
|
|
|
|
*tp[1] = 0;
|
|
|
|
*tp[0] = pp2;
|
|
|
|
page_free_list = pp1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// if there's a page that shouldn't be on the free list,
|
|
|
|
// try to make sure it eventually causes trouble.
|
|
|
|
for (pp = page_free_list; pp; pp = pp->pp_link)
|
|
|
|
if (PDX(page2pa(pp)) < pdx_limit)
|
|
|
|
memset(page2kva(pp), 0x97, 128);
|
|
|
|
|
|
|
|
first_free_page = (char *) boot_alloc(0);
|
|
|
|
for (pp = page_free_list; pp; pp = pp->pp_link) {
|
|
|
|
// check that we didn't corrupt the free list itself
|
|
|
|
assert(pp >= pages);
|
|
|
|
assert(pp < pages + npages);
|
|
|
|
assert(((char *) pp - (char *) pages) % sizeof(*pp) == 0);
|
|
|
|
|
|
|
|
// check a few pages that shouldn't be on the free list
|
|
|
|
assert(page2pa(pp) != 0);
|
|
|
|
assert(page2pa(pp) != IOPHYSMEM);
|
|
|
|
assert(page2pa(pp) != EXTPHYSMEM - PGSIZE);
|
|
|
|
assert(page2pa(pp) != EXTPHYSMEM);
|
|
|
|
assert(page2pa(pp) < EXTPHYSMEM || (char *) page2kva(pp) >= first_free_page);
|
2018-10-06 06:52:47 -07:00
|
|
|
// (new test for lab 4)
|
|
|
|
assert(page2pa(pp) != MPENTRY_PADDR);
|
2018-09-12 11:55:07 -07:00
|
|
|
|
|
|
|
if (page2pa(pp) < EXTPHYSMEM)
|
|
|
|
++nfree_basemem;
|
|
|
|
else
|
|
|
|
++nfree_extmem;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(nfree_basemem > 0);
|
|
|
|
assert(nfree_extmem > 0);
|
|
|
|
|
|
|
|
cprintf("check_page_free_list() succeeded!\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Check the physical page allocator (page_alloc(), page_free(),
|
|
|
|
// and page_init()).
|
|
|
|
//
|
|
|
|
static void
|
|
|
|
check_page_alloc(void)
|
|
|
|
{
|
|
|
|
struct PageInfo *pp, *pp0, *pp1, *pp2;
|
|
|
|
int nfree;
|
|
|
|
struct PageInfo *fl;
|
|
|
|
char *c;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!pages)
|
|
|
|
panic("'pages' is a null pointer!");
|
|
|
|
|
|
|
|
// check number of free pages
|
|
|
|
for (pp = page_free_list, nfree = 0; pp; pp = pp->pp_link)
|
|
|
|
++nfree;
|
|
|
|
|
|
|
|
// should be able to allocate three pages
|
|
|
|
pp0 = pp1 = pp2 = 0;
|
|
|
|
assert((pp0 = page_alloc(0)));
|
|
|
|
assert((pp1 = page_alloc(0)));
|
|
|
|
assert((pp2 = page_alloc(0)));
|
|
|
|
|
|
|
|
assert(pp0);
|
|
|
|
assert(pp1 && pp1 != pp0);
|
|
|
|
assert(pp2 && pp2 != pp1 && pp2 != pp0);
|
|
|
|
assert(page2pa(pp0) < npages*PGSIZE);
|
|
|
|
assert(page2pa(pp1) < npages*PGSIZE);
|
|
|
|
assert(page2pa(pp2) < npages*PGSIZE);
|
|
|
|
|
|
|
|
// temporarily steal the rest of the free pages
|
|
|
|
fl = page_free_list;
|
|
|
|
page_free_list = 0;
|
|
|
|
|
|
|
|
// should be no free memory
|
|
|
|
assert(!page_alloc(0));
|
|
|
|
|
|
|
|
// free and re-allocate?
|
|
|
|
page_free(pp0);
|
|
|
|
page_free(pp1);
|
|
|
|
page_free(pp2);
|
|
|
|
pp0 = pp1 = pp2 = 0;
|
|
|
|
assert((pp0 = page_alloc(0)));
|
|
|
|
assert((pp1 = page_alloc(0)));
|
|
|
|
assert((pp2 = page_alloc(0)));
|
|
|
|
assert(pp0);
|
|
|
|
assert(pp1 && pp1 != pp0);
|
|
|
|
assert(pp2 && pp2 != pp1 && pp2 != pp0);
|
|
|
|
assert(!page_alloc(0));
|
|
|
|
|
|
|
|
// test flags
|
|
|
|
memset(page2kva(pp0), 1, PGSIZE);
|
|
|
|
page_free(pp0);
|
|
|
|
assert((pp = page_alloc(ALLOC_ZERO)));
|
|
|
|
assert(pp && pp0 == pp);
|
|
|
|
c = page2kva(pp);
|
|
|
|
for (i = 0; i < PGSIZE; i++)
|
|
|
|
assert(c[i] == 0);
|
|
|
|
|
|
|
|
// give free list back
|
|
|
|
page_free_list = fl;
|
|
|
|
|
|
|
|
// free the pages we took
|
|
|
|
page_free(pp0);
|
|
|
|
page_free(pp1);
|
|
|
|
page_free(pp2);
|
|
|
|
|
|
|
|
// number of free pages should be the same
|
|
|
|
for (pp = page_free_list; pp; pp = pp->pp_link)
|
|
|
|
--nfree;
|
|
|
|
assert(nfree == 0);
|
|
|
|
|
|
|
|
cprintf("check_page_alloc() succeeded!\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Checks that the kernel part of virtual address space
|
|
|
|
// has been set up roughly correctly (by mem_init()).
|
|
|
|
//
|
|
|
|
// This function doesn't test every corner case,
|
|
|
|
// but it is a pretty good sanity check.
|
|
|
|
//
|
|
|
|
|
|
|
|
static void
|
|
|
|
check_kern_pgdir(void)
|
|
|
|
{
|
|
|
|
uint32_t i, n;
|
|
|
|
pde_t *pgdir;
|
|
|
|
|
|
|
|
pgdir = kern_pgdir;
|
|
|
|
|
|
|
|
// check pages array
|
|
|
|
n = ROUNDUP(npages*sizeof(struct PageInfo), PGSIZE);
|
|
|
|
for (i = 0; i < n; i += PGSIZE)
|
|
|
|
assert(check_va2pa(pgdir, UPAGES + i) == PADDR(pages) + i);
|
|
|
|
|
2018-09-25 09:22:51 -07:00
|
|
|
// check envs array (new test for lab 3)
|
|
|
|
n = ROUNDUP(NENV*sizeof(struct Env), PGSIZE);
|
|
|
|
for (i = 0; i < n; i += PGSIZE)
|
|
|
|
assert(check_va2pa(pgdir, UENVS + i) == PADDR(envs) + i);
|
2018-09-12 11:55:07 -07:00
|
|
|
|
|
|
|
// check phys mem
|
|
|
|
for (i = 0; i < npages * PGSIZE; i += PGSIZE)
|
|
|
|
assert(check_va2pa(pgdir, KERNBASE + i) == i);
|
|
|
|
|
|
|
|
// check kernel stack
|
2018-10-06 06:52:47 -07:00
|
|
|
// (updated in lab 4 to check per-CPU kernel stacks)
|
|
|
|
for (n = 0; n < NCPU; n++) {
|
|
|
|
uint32_t base = KSTACKTOP - (KSTKSIZE + KSTKGAP) * (n + 1);
|
|
|
|
for (i = 0; i < KSTKSIZE; i += PGSIZE)
|
|
|
|
assert(check_va2pa(pgdir, base + KSTKGAP + i)
|
|
|
|
== PADDR(percpu_kstacks[n]) + i);
|
|
|
|
for (i = 0; i < KSTKGAP; i += PGSIZE)
|
|
|
|
assert(check_va2pa(pgdir, base + i) == ~0);
|
|
|
|
}
|
2018-09-12 11:55:07 -07:00
|
|
|
|
|
|
|
// check PDE permissions
|
|
|
|
for (i = 0; i < NPDENTRIES; i++) {
|
|
|
|
switch (i) {
|
|
|
|
case PDX(UVPT):
|
|
|
|
case PDX(KSTACKTOP-1):
|
|
|
|
case PDX(UPAGES):
|
2018-09-25 09:22:51 -07:00
|
|
|
case PDX(UENVS):
|
2018-10-06 06:52:47 -07:00
|
|
|
case PDX(MMIOBASE):
|
2018-09-12 11:55:07 -07:00
|
|
|
assert(pgdir[i] & PTE_P);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (i >= PDX(KERNBASE)) {
|
|
|
|
assert(pgdir[i] & PTE_P);
|
|
|
|
assert(pgdir[i] & PTE_W);
|
|
|
|
} else
|
|
|
|
assert(pgdir[i] == 0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cprintf("check_kern_pgdir() succeeded!\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function returns the physical address of the page containing 'va',
|
|
|
|
// defined by the page directory 'pgdir'. The hardware normally performs
|
|
|
|
// this functionality for us! We define our own version to help check
|
|
|
|
// the check_kern_pgdir() function; it shouldn't be used elsewhere.
|
|
|
|
|
|
|
|
static physaddr_t
|
|
|
|
check_va2pa(pde_t *pgdir, uintptr_t va)
|
|
|
|
{
|
|
|
|
pte_t *p;
|
|
|
|
|
|
|
|
pgdir = &pgdir[PDX(va)];
|
|
|
|
if (!(*pgdir & PTE_P))
|
|
|
|
return ~0;
|
|
|
|
p = (pte_t*) KADDR(PTE_ADDR(*pgdir));
|
|
|
|
if (!(p[PTX(va)] & PTE_P))
|
|
|
|
return ~0;
|
|
|
|
return PTE_ADDR(p[PTX(va)]);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// check page_insert, page_remove, &c
|
|
|
|
static void
|
|
|
|
check_page(void)
|
|
|
|
{
|
|
|
|
struct PageInfo *pp, *pp0, *pp1, *pp2;
|
|
|
|
struct PageInfo *fl;
|
|
|
|
pte_t *ptep, *ptep1;
|
|
|
|
void *va;
|
2018-10-06 06:52:47 -07:00
|
|
|
uintptr_t mm1, mm2;
|
2018-09-12 11:55:07 -07:00
|
|
|
int i;
|
|
|
|
extern pde_t entry_pgdir[];
|
|
|
|
|
|
|
|
// should be able to allocate three pages
|
|
|
|
pp0 = pp1 = pp2 = 0;
|
|
|
|
assert((pp0 = page_alloc(0)));
|
|
|
|
assert((pp1 = page_alloc(0)));
|
|
|
|
assert((pp2 = page_alloc(0)));
|
|
|
|
|
|
|
|
assert(pp0);
|
|
|
|
assert(pp1 && pp1 != pp0);
|
|
|
|
assert(pp2 && pp2 != pp1 && pp2 != pp0);
|
|
|
|
|
|
|
|
// temporarily steal the rest of the free pages
|
|
|
|
fl = page_free_list;
|
|
|
|
page_free_list = 0;
|
|
|
|
|
|
|
|
// should be no free memory
|
|
|
|
assert(!page_alloc(0));
|
|
|
|
|
|
|
|
// there is no page allocated at address 0
|
|
|
|
assert(page_lookup(kern_pgdir, (void *) 0x0, &ptep) == NULL);
|
|
|
|
|
|
|
|
// there is no free memory, so we can't allocate a page table
|
|
|
|
assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) < 0);
|
|
|
|
|
|
|
|
// free pp0 and try again: pp0 should be used for page table
|
|
|
|
page_free(pp0);
|
|
|
|
assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) == 0);
|
|
|
|
assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
|
|
|
|
assert(check_va2pa(kern_pgdir, 0x0) == page2pa(pp1));
|
|
|
|
assert(pp1->pp_ref == 1);
|
|
|
|
assert(pp0->pp_ref == 1);
|
|
|
|
|
|
|
|
// should be able to map pp2 at PGSIZE because pp0 is already allocated for page table
|
|
|
|
assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
|
|
|
|
assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
|
|
|
|
assert(pp2->pp_ref == 1);
|
|
|
|
|
|
|
|
// should be no free memory
|
|
|
|
assert(!page_alloc(0));
|
|
|
|
|
|
|
|
// should be able to map pp2 at PGSIZE because it's already there
|
|
|
|
assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
|
|
|
|
assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
|
|
|
|
assert(pp2->pp_ref == 1);
|
|
|
|
|
|
|
|
// pp2 should NOT be on the free list
|
|
|
|
// could happen in ref counts are handled sloppily in page_insert
|
|
|
|
assert(!page_alloc(0));
|
|
|
|
|
|
|
|
// check that pgdir_walk returns a pointer to the pte
|
|
|
|
ptep = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(PGSIZE)]));
|
|
|
|
assert(pgdir_walk(kern_pgdir, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE));
|
|
|
|
|
|
|
|
// should be able to change permissions too.
|
|
|
|
assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W|PTE_U) == 0);
|
|
|
|
assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
|
|
|
|
assert(pp2->pp_ref == 1);
|
|
|
|
assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U);
|
|
|
|
assert(kern_pgdir[0] & PTE_U);
|
|
|
|
|
|
|
|
// should be able to remap with fewer permissions
|
|
|
|
assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
|
|
|
|
assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_W);
|
|
|
|
assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U));
|
|
|
|
|
|
|
|
// should not be able to map at PTSIZE because need free page for page table
|
|
|
|
assert(page_insert(kern_pgdir, pp0, (void*) PTSIZE, PTE_W) < 0);
|
|
|
|
|
|
|
|
// insert pp1 at PGSIZE (replacing pp2)
|
|
|
|
assert(page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W) == 0);
|
|
|
|
assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U));
|
|
|
|
|
|
|
|
// should have pp1 at both 0 and PGSIZE, pp2 nowhere, ...
|
|
|
|
assert(check_va2pa(kern_pgdir, 0) == page2pa(pp1));
|
|
|
|
assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1));
|
|
|
|
// ... and ref counts should reflect this
|
|
|
|
assert(pp1->pp_ref == 2);
|
|
|
|
assert(pp2->pp_ref == 0);
|
|
|
|
|
|
|
|
// pp2 should be returned by page_alloc
|
|
|
|
assert((pp = page_alloc(0)) && pp == pp2);
|
|
|
|
|
|
|
|
// unmapping pp1 at 0 should keep pp1 at PGSIZE
|
|
|
|
page_remove(kern_pgdir, 0x0);
|
|
|
|
assert(check_va2pa(kern_pgdir, 0x0) == ~0);
|
|
|
|
assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1));
|
|
|
|
assert(pp1->pp_ref == 1);
|
|
|
|
assert(pp2->pp_ref == 0);
|
|
|
|
|
|
|
|
// test re-inserting pp1 at PGSIZE
|
|
|
|
assert(page_insert(kern_pgdir, pp1, (void*) PGSIZE, 0) == 0);
|
|
|
|
assert(pp1->pp_ref);
|
|
|
|
assert(pp1->pp_link == NULL);
|
|
|
|
|
|
|
|
// unmapping pp1 at PGSIZE should free it
|
|
|
|
page_remove(kern_pgdir, (void*) PGSIZE);
|
|
|
|
assert(check_va2pa(kern_pgdir, 0x0) == ~0);
|
|
|
|
assert(check_va2pa(kern_pgdir, PGSIZE) == ~0);
|
|
|
|
assert(pp1->pp_ref == 0);
|
|
|
|
assert(pp2->pp_ref == 0);
|
|
|
|
|
|
|
|
// so it should be returned by page_alloc
|
|
|
|
assert((pp = page_alloc(0)) && pp == pp1);
|
|
|
|
|
|
|
|
// should be no free memory
|
|
|
|
assert(!page_alloc(0));
|
|
|
|
|
|
|
|
// forcibly take pp0 back
|
|
|
|
assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
|
|
|
|
kern_pgdir[0] = 0;
|
|
|
|
assert(pp0->pp_ref == 1);
|
|
|
|
pp0->pp_ref = 0;
|
|
|
|
|
|
|
|
// check pointer arithmetic in pgdir_walk
|
|
|
|
page_free(pp0);
|
|
|
|
va = (void*)(PGSIZE * NPDENTRIES + PGSIZE);
|
|
|
|
ptep = pgdir_walk(kern_pgdir, va, 1);
|
|
|
|
ptep1 = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(va)]));
|
|
|
|
assert(ptep == ptep1 + PTX(va));
|
|
|
|
kern_pgdir[PDX(va)] = 0;
|
|
|
|
pp0->pp_ref = 0;
|
|
|
|
|
|
|
|
// check that new page tables get cleared
|
|
|
|
memset(page2kva(pp0), 0xFF, PGSIZE);
|
|
|
|
page_free(pp0);
|
|
|
|
pgdir_walk(kern_pgdir, 0x0, 1);
|
|
|
|
ptep = (pte_t *) page2kva(pp0);
|
|
|
|
for(i=0; i<NPTENTRIES; i++)
|
|
|
|
assert((ptep[i] & PTE_P) == 0);
|
|
|
|
kern_pgdir[0] = 0;
|
|
|
|
pp0->pp_ref = 0;
|
|
|
|
|
|
|
|
// give free list back
|
|
|
|
page_free_list = fl;
|
|
|
|
|
|
|
|
// free the pages we took
|
|
|
|
page_free(pp0);
|
|
|
|
page_free(pp1);
|
|
|
|
page_free(pp2);
|
|
|
|
|
2018-10-06 06:52:47 -07:00
|
|
|
// test mmio_map_region
|
|
|
|
mm1 = (uintptr_t) mmio_map_region(0, 4097);
|
|
|
|
mm2 = (uintptr_t) mmio_map_region(0, 4096);
|
|
|
|
// check that they're in the right region
|
|
|
|
assert(mm1 >= MMIOBASE && mm1 + 8192 < MMIOLIM);
|
|
|
|
assert(mm2 >= MMIOBASE && mm2 + 8192 < MMIOLIM);
|
|
|
|
// check that they're page-aligned
|
|
|
|
assert(mm1 % PGSIZE == 0 && mm2 % PGSIZE == 0);
|
|
|
|
// check that they don't overlap
|
|
|
|
assert(mm1 + 8192 <= mm2);
|
|
|
|
// check page mappings
|
|
|
|
assert(check_va2pa(kern_pgdir, mm1) == 0);
|
|
|
|
assert(check_va2pa(kern_pgdir, mm1+PGSIZE) == PGSIZE);
|
|
|
|
assert(check_va2pa(kern_pgdir, mm2) == 0);
|
|
|
|
assert(check_va2pa(kern_pgdir, mm2+PGSIZE) == ~0);
|
|
|
|
// check permissions
|
|
|
|
assert(*pgdir_walk(kern_pgdir, (void*) mm1, 0) & (PTE_W|PTE_PWT|PTE_PCD));
|
|
|
|
assert(!(*pgdir_walk(kern_pgdir, (void*) mm1, 0) & PTE_U));
|
|
|
|
// clear the mappings
|
|
|
|
*pgdir_walk(kern_pgdir, (void*) mm1, 0) = 0;
|
|
|
|
*pgdir_walk(kern_pgdir, (void*) mm1 + PGSIZE, 0) = 0;
|
|
|
|
*pgdir_walk(kern_pgdir, (void*) mm2, 0) = 0;
|
|
|
|
|
2018-09-12 11:55:07 -07:00
|
|
|
cprintf("check_page() succeeded!\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
// check page_insert, page_remove, &c, with an installed kern_pgdir
|
|
|
|
static void
|
|
|
|
check_page_installed_pgdir(void)
|
|
|
|
{
|
|
|
|
struct PageInfo *pp, *pp0, *pp1, *pp2;
|
|
|
|
struct PageInfo *fl;
|
|
|
|
pte_t *ptep, *ptep1;
|
|
|
|
uintptr_t va;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
// check that we can read and write installed pages
|
|
|
|
pp1 = pp2 = 0;
|
|
|
|
assert((pp0 = page_alloc(0)));
|
|
|
|
assert((pp1 = page_alloc(0)));
|
|
|
|
assert((pp2 = page_alloc(0)));
|
|
|
|
page_free(pp0);
|
|
|
|
memset(page2kva(pp1), 1, PGSIZE);
|
|
|
|
memset(page2kva(pp2), 2, PGSIZE);
|
|
|
|
page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W);
|
|
|
|
assert(pp1->pp_ref == 1);
|
|
|
|
assert(*(uint32_t *)PGSIZE == 0x01010101U);
|
|
|
|
page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W);
|
|
|
|
assert(*(uint32_t *)PGSIZE == 0x02020202U);
|
|
|
|
assert(pp2->pp_ref == 1);
|
|
|
|
assert(pp1->pp_ref == 0);
|
|
|
|
*(uint32_t *)PGSIZE = 0x03030303U;
|
|
|
|
assert(*(uint32_t *)page2kva(pp2) == 0x03030303U);
|
|
|
|
page_remove(kern_pgdir, (void*) PGSIZE);
|
|
|
|
assert(pp2->pp_ref == 0);
|
|
|
|
|
|
|
|
// forcibly take pp0 back
|
|
|
|
assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
|
|
|
|
kern_pgdir[0] = 0;
|
|
|
|
assert(pp0->pp_ref == 1);
|
|
|
|
pp0->pp_ref = 0;
|
|
|
|
|
|
|
|
// free the pages we took
|
|
|
|
page_free(pp0);
|
|
|
|
|
|
|
|
cprintf("check_page_installed_pgdir() succeeded!\n");
|
|
|
|
}
|