Initial implementation of lab 2.
This commit is contained in:
parent
d7a96eb60f
commit
ca51596893
117
kern/pmap.c
117
kern/pmap.c
|
@ -102,8 +102,10 @@ boot_alloc(uint32_t n)
|
|||
// to a multiple of PGSIZE.
|
||||
//
|
||||
// LAB 2: Your code here.
|
||||
result = nextfree;
|
||||
nextfree = ROUNDUP(nextfree + n, PGSIZE);
|
||||
|
||||
return NULL;
|
||||
return result;
|
||||
}
|
||||
|
||||
// Set up a two-level page table:
|
||||
|
@ -124,9 +126,6 @@ mem_init(void)
|
|||
// Find out how much memory the machine has (npages & npages_basemem).
|
||||
i386_detect_memory();
|
||||
|
||||
// Remove this line when you're ready to test this function.
|
||||
panic("mem_init: This function is not finished\n");
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// create initial page directory.
|
||||
kern_pgdir = (pde_t *) boot_alloc(PGSIZE);
|
||||
|
@ -148,7 +147,9 @@ mem_init(void)
|
|||
// array. 'npages' is the number of physical pages in memory. Use memset
|
||||
// to initialize all fields of each struct PageInfo to 0.
|
||||
// Your code goes here:
|
||||
|
||||
size_t pages_size = sizeof(struct PageInfo) * npages;
|
||||
pages = boot_alloc(pages_size);
|
||||
memset(pages, 0, pages_size);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Now that we've allocated the initial kernel data structures, we set
|
||||
|
@ -172,6 +173,11 @@ mem_init(void)
|
|||
// (ie. perm = PTE_U | PTE_P)
|
||||
// - pages itself -- kernel RW, user NONE
|
||||
// Your code goes here:
|
||||
boot_map_region(kern_pgdir,
|
||||
UPAGES, ROUNDUP(pages_size, PGSIZE),
|
||||
PADDR(pages), PTE_W);
|
||||
kern_pgdir[PDX(UPAGES)] |= PTE_U | PTE_P;
|
||||
kern_pgdir[PDX(UPAGES)] &= ~PTE_W;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Use the physical memory that 'bootstack' refers to as the kernel
|
||||
|
@ -184,6 +190,11 @@ mem_init(void)
|
|||
// overwrite memory. Known as a "guard page".
|
||||
// Permissions: kernel RW, user NONE
|
||||
// Your code goes here:
|
||||
boot_map_region(kern_pgdir,
|
||||
KSTACKTOP-KSTKSIZE, KSTKSIZE,
|
||||
PADDR(bootstack), PTE_W);
|
||||
kern_pgdir[PDX(KSTACKTOP-KSTKSIZE)] |= PTE_W | PTE_P;
|
||||
kern_pgdir[PDX(KSTACKTOP-KSTKSIZE)] &= ~PTE_U;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Map all of physical memory at KERNBASE.
|
||||
|
@ -193,6 +204,12 @@ mem_init(void)
|
|||
// we just set up the mapping anyway.
|
||||
// Permissions: kernel RW, user NONE
|
||||
// Your code goes here:
|
||||
boot_map_region(kern_pgdir,
|
||||
KERNBASE, 0x100000000 - KERNBASE,
|
||||
0, PTE_W);
|
||||
kern_pgdir[PDX(KERNBASE)] |= PTE_W | PTE_P;
|
||||
kern_pgdir[PDX(KERNBASE)] &= ~PTE_U;
|
||||
|
||||
|
||||
// Check that the initial page directory has been set up correctly.
|
||||
check_kern_pgdir();
|
||||
|
@ -224,6 +241,14 @@ mem_init(void)
|
|||
// The 'pages' array has one 'struct PageInfo' entry per physical page.
|
||||
// Pages are reference counted, and free pages are kept on a linked list.
|
||||
// --------------------------------------------------------------
|
||||
bool
|
||||
is_reserved(size_t pagenum) {
|
||||
if(pagenum == 0) return true;
|
||||
if(pagenum >= PGNUM(IOPHYSMEM) &&
|
||||
pagenum < PGNUM(PADDR(boot_alloc(0)))) return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
//
|
||||
// Initialize page structure and memory free list.
|
||||
|
@ -253,11 +278,15 @@ page_init(void)
|
|||
// free pages!
|
||||
size_t i;
|
||||
for (i = 0; i < npages; i++) {
|
||||
if(is_reserved(i)) {
|
||||
pages[i].pp_ref = 1;
|
||||
} else {
|
||||
pages[i].pp_ref = 0;
|
||||
pages[i].pp_link = page_free_list;
|
||||
page_free_list = &pages[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Allocates a physical page. If (alloc_flags & ALLOC_ZERO), fills the entire
|
||||
|
@ -274,8 +303,17 @@ page_init(void)
|
|||
struct PageInfo *
|
||||
page_alloc(int alloc_flags)
|
||||
{
|
||||
// Fill this function in
|
||||
return 0;
|
||||
struct PageInfo* to_return = page_free_list;
|
||||
if(to_return == 0) return NULL;
|
||||
|
||||
page_free_list = to_return->pp_link;
|
||||
to_return->pp_link = NULL;
|
||||
|
||||
if(alloc_flags & ALLOC_ZERO) {
|
||||
memset(page2kva(to_return), 0, PGSIZE);
|
||||
}
|
||||
|
||||
return to_return;
|
||||
}
|
||||
|
||||
//
|
||||
|
@ -285,9 +323,10 @@ page_alloc(int alloc_flags)
|
|||
void
|
||||
page_free(struct PageInfo *pp)
|
||||
{
|
||||
// Fill this function in
|
||||
// Hint: You may want to panic if pp->pp_ref is nonzero or
|
||||
// pp->pp_link is not NULL.
|
||||
if(pp->pp_ref || pp->pp_link != NULL)
|
||||
panic("Freeing page with nonzero reference count!");
|
||||
pp->pp_link = page_free_list;
|
||||
page_free_list = pp;
|
||||
}
|
||||
|
||||
//
|
||||
|
@ -326,8 +365,25 @@ page_decref(struct PageInfo* pp)
|
|||
pte_t *
|
||||
pgdir_walk(pde_t *pgdir, const void *va, int create)
|
||||
{
|
||||
pte_t* base_table = NULL;
|
||||
|
||||
if(pgdir[PDX(va)] & PTE_P) {
|
||||
// We have a valid page table; awesome!
|
||||
base_table = KADDR(PTE_ADDR(pgdir[PDX(va)]));
|
||||
} else {
|
||||
if(!create) return NULL;
|
||||
|
||||
struct PageInfo* page = page_alloc(ALLOC_ZERO);
|
||||
if(!page) return NULL;
|
||||
|
||||
page->pp_ref++;
|
||||
physaddr_t ppa = page2pa(page);
|
||||
pgdir[PDX(va)] = ppa | PTE_P | PTE_U | PTE_W;
|
||||
base_table = KADDR(ppa);
|
||||
}
|
||||
|
||||
// Fill this function in
|
||||
return NULL;
|
||||
return &base_table[PTX(va)];
|
||||
}
|
||||
|
||||
//
|
||||
|
@ -344,7 +400,16 @@ pgdir_walk(pde_t *pgdir, const void *va, int create)
|
|||
static void
|
||||
boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm)
|
||||
{
|
||||
// Fill this function in
|
||||
size_t count = size / PGSIZE;
|
||||
uintptr_t start_va = va;
|
||||
physaddr_t start_pa = pa;
|
||||
while(count-- && start_va <= va && start_pa <= pa) {
|
||||
pte_t* pte = pgdir_walk(pgdir, (void*) va, true);
|
||||
*pte = pa | perm | PTE_P;
|
||||
|
||||
va += PGSIZE;
|
||||
pa += PGSIZE;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
@ -375,7 +440,14 @@ boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm
|
|||
int
|
||||
page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm)
|
||||
{
|
||||
// Fill this function in
|
||||
pte_t* pte;
|
||||
if(!(pte = pgdir_walk(pgdir, va, true))) return -E_NO_MEM;
|
||||
|
||||
pp->pp_ref++;
|
||||
if(*pte & PTE_P) page_remove(pgdir, va);
|
||||
*pte = page2pa(pp) | PTE_P | perm;
|
||||
tlb_invalidate(pgdir, va);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -393,10 +465,17 @@ page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm)
|
|||
struct PageInfo *
|
||||
page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
|
||||
{
|
||||
// Fill this function in
|
||||
pte_t* pte;
|
||||
if(!(pte = pgdir_walk(pgdir, va, false))) {
|
||||
if(pte_store) *pte_store = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct PageInfo* pp = pa2page(PTE_ADDR(*pte));
|
||||
if(pte_store) *pte_store = pte;
|
||||
return pp;
|
||||
}
|
||||
|
||||
//
|
||||
// Unmaps the physical page at virtual address 'va'.
|
||||
// If there is no physical page at that address, silently does nothing.
|
||||
|
@ -415,7 +494,15 @@ page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
|
|||
void
|
||||
page_remove(pde_t *pgdir, void *va)
|
||||
{
|
||||
// Fill this function in
|
||||
pte_t* pte;
|
||||
struct PageInfo* pp;
|
||||
|
||||
pp = page_lookup(pgdir, va, &pte);
|
||||
if(!(*pte & PTE_P)) return;
|
||||
|
||||
if(!(--(pp->pp_ref))) page_free(pp);
|
||||
*pte = 0;
|
||||
tlb_invalidate(pgdir, va);
|
||||
}
|
||||
|
||||
//
|
||||
|
|
Loading…
Reference in New Issue
Block a user