47 |
vm_offset_t virtual_start; |
vm_offset_t virtual_start; |
48 |
vm_offset_t virtual_end; |
vm_offset_t virtual_end; |
49 |
|
|
50 |
/* Convert from generic protection to PTE bits. . */ |
/* Top of physical memory. */ |
51 |
|
vm_offset_t physical_top; |
52 |
|
|
53 |
|
/* We maintain a <PMAP, VA> table, indexed by physical page. |
54 |
|
We use this table to change protection for all mappings |
55 |
|
for a physical page. */ |
56 |
|
struct pm_ent |
57 |
|
{ |
58 |
|
struct pm_ent *pme_next; /* Next mapping for page. */ |
59 |
|
struct pmap *pme_pmap; /* Physical map. */ |
60 |
|
vm_offset_t pme_va; /* Virtual adreess of mapping. */ |
61 |
|
}; |
62 |
|
|
63 |
|
static struct pm_ent **pm_table; |
64 |
|
static struct kmem_cache *pm_ent_cache; |
65 |
|
|
66 |
|
/* Convenience function for inserting mapping into the |
67 |
|
physical page table. ?? */ |
68 |
|
static inline void |
69 |
|
pm_insert_entry (struct pmap *pmap, vm_offset_t pa, vm_offset_t va) |
70 |
|
{ |
71 |
|
struct pm_ent *pme; |
72 |
|
|
73 |
|
if (pa >= physical_top || !pm_table || !pm_ent_cache) |
74 |
|
return; |
75 |
|
|
76 |
|
pme = (struct pm_ent *) kmem_cache_alloc (pm_ent_cache); |
77 |
|
pme->pme_pmap = pmap; |
78 |
|
pme->pme_va = va; |
79 |
|
|
80 |
|
pme->pme_next = pm_table [vm_atop (pa)]; |
81 |
|
pm_table [vm_atop (pa)] = pme; |
82 |
|
} |
83 |
|
|
84 |
|
/* Convert from generic protection to PTE bits. . */ |
85 |
static inline int convert_real_prot (vm_prot_t prot) |
static inline int convert_real_prot (vm_prot_t prot) |
86 |
{ |
{ |
87 |
#define VM_PROT_RW (VM_PROT_READ | VM_PROT_WRITE) |
#define VM_PROT_RW (VM_PROT_READ | VM_PROT_WRITE) |
93 |
: prot == VM_PROT_EXEC ? (PTE_VALID|PTE_USER) : 0; |
: prot == VM_PROT_EXEC ? (PTE_VALID|PTE_USER) : 0; |
94 |
} |
} |
95 |
|
|
96 |
|
/* Flush local TLB cache. */ |
97 |
|
void pmap_flush_tlb (void) |
98 |
|
{ |
99 |
|
set_cr3 ((void *) get_cr3 ()); |
100 |
|
} |
101 |
|
|
102 |
/* Global variable for kernel physical map. */ |
/* Global variable for kernel physical map. */ |
103 |
struct pmap kernel_pmap; |
struct pmap kernel_pmap; |
104 |
|
|
107 |
|
|
108 |
/* ??? This function "allocates" a physical page. Returns 0 if we |
/* ??? This function "allocates" a physical page. Returns 0 if we |
109 |
have reached the top of physical memory. */ |
have reached the top of physical memory. */ |
|
|
|
110 |
static vm_offset_t |
static vm_offset_t |
111 |
pmap_grab_page (void) |
pmap_grab_page (void) |
112 |
{ |
{ |
115 |
int bank; |
int bank; |
116 |
|
|
117 |
/* First we check if we initialized the resident pages module. */ |
/* First we check if we initialized the resident pages module. */ |
|
|
|
118 |
if (vm_resident_pages_init == true) |
if (vm_resident_pages_init == true) |
119 |
{ |
{ |
120 |
pa = (vm_offset_t) vm_page_grab_physical (); |
pa = (vm_offset_t) vm_page_grab_physical (); |
124 |
|
|
125 |
/* PA 0 will never be among those given to VM so we can use it |
/* PA 0 will never be among those given to VM so we can use it |
126 |
to indicate we couldn't grab any memory. */ |
to indicate we couldn't grab any memory. */ |
|
|
|
127 |
for (ps = vm_physmem, bank = 0, pa = 0; bank < vm_physmem_count; bank++, ps++) |
for (ps = vm_physmem, bank = 0, pa = 0; bank < vm_physmem_count; bank++, ps++) |
128 |
{ |
{ |
129 |
/* ??? this check is not needed really. */ |
/* ??? this check is not needed really. */ |
153 |
} |
} |
154 |
} |
} |
155 |
|
|
156 |
|
trace_printf ("page grab: pa = %x", pa); |
157 |
return pa; |
return pa; |
158 |
} |
} |
159 |
|
|
163 |
{ |
{ |
164 |
extern struct multiboot_info *cpu_multiboot_ptr; /* from -start.S */ |
extern struct multiboot_info *cpu_multiboot_ptr; /* from -start.S */ |
165 |
vm_size_t memory_top, memory_start; |
vm_size_t memory_top, memory_start; |
166 |
|
struct vm_physseg *ps; |
167 |
int npages, i; |
int npages, i; |
168 |
|
|
169 |
/* Add lower physical memory. */ |
/* Add lower physical memory. */ |
170 |
|
vm_page_load (0x1000, vm_trunc_page (((vm_size_t) cpu_multiboot_ptr->mem_lower * 1024)), |
171 |
vm_page_load (0x1000, ((vm_size_t) cpu_multiboot_ptr->mem_lower * 1024), |
0x1000, vm_trunc_page (((vm_size_t) cpu_multiboot_ptr->mem_lower * 1024))); |
|
0x1000, ((vm_size_t) cpu_multiboot_ptr->mem_lower * 1024)); |
|
172 |
|
|
173 |
/* Add upper physical memory (excluding modules) */ |
/* Add upper physical memory (excluding modules) */ |
|
|
|
174 |
memory_top = ((vm_size_t) cpu_multiboot_ptr->mem_upper) * 1024; |
memory_top = ((vm_size_t) cpu_multiboot_ptr->mem_upper) * 1024; |
175 |
for (i = 0, memory_start = vm_round_page (kernel_end); i < bootstrap_count; i++) |
for (i = 0, memory_start = vm_round_page (kernel_end); i < bootstrap_count; i++) |
176 |
{ |
{ |
177 |
struct bootstrap *module = & bootstrap_modules [i]; |
struct bootstrap *module = & bootstrap_modules [i]; |
178 |
|
|
179 |
|
trace_printf ("module %d: pa %x-%x", i, module->offset, |
180 |
|
module->offset + module->size); |
181 |
|
|
182 |
if (memory_start != vm_trunc_page (module->offset)) |
if (memory_start != vm_trunc_page (module->offset)) |
183 |
{ |
{ |
184 |
vm_page_load (memory_start, vm_trunc_page (module->offset), |
vm_page_load (memory_start, vm_trunc_page (module->offset), |
186 |
memory_start = vm_round_page (module->offset + module->size); |
memory_start = vm_round_page (module->offset + module->size); |
187 |
} |
} |
188 |
} |
} |
|
|
|
189 |
vm_page_load (memory_start, vm_trunc_page (memory_top), |
vm_page_load (memory_start, vm_trunc_page (memory_top), |
190 |
memory_start, vm_trunc_page (memory_top)); |
memory_start, vm_trunc_page (memory_top)); |
191 |
|
physical_top = memory_top; |
192 |
|
|
193 |
/* Set virtual address space limits. */ |
/* Set virtual address space limits. */ |
|
|
|
|
#if 0 |
|
|
virtual_start = vm_round_page (kernel_end); |
|
|
virtual_end = VM_KERN_MAX_ADDRESS; |
|
|
#else |
|
194 |
virtual_start = vm_round_page (memory_top); |
virtual_start = vm_round_page (memory_top); |
195 |
virtual_end = VM_KERN_MAX_ADDRESS; |
virtual_end = VM_KERN_MAX_ADDRESS; |
|
#endif |
|
196 |
|
|
197 |
/* Print some info to the console. */ |
/* Print some info to the console. */ |
|
|
|
198 |
npages = vm_atop (((memory_top) - 0x1000) - (vm_round_page (kernel_end) |
npages = vm_atop (((memory_top) - 0x1000) - (vm_round_page (kernel_end) |
199 |
- vm_trunc_page (kernel_start))); |
- vm_trunc_page (kernel_start))); |
200 |
printf ("real memory %d kbytes\n", npages * VM_PAGE_SIZE / 1024); |
printf ("real memory %d kbytes\n", npages * VM_PAGE_SIZE / 1024); |
201 |
|
|
|
|
|
202 |
/* Allocate memory for kernel page directory. Kernel holds one reference. */ |
/* Allocate memory for kernel page directory. Kernel holds one reference. */ |
|
|
|
203 |
kernel_pmap.pde = (void **) pmap_grab_page (); |
kernel_pmap.pde = (void **) pmap_grab_page (); |
204 |
assert (kernel_pmap.pde); |
assert (kernel_pmap.pde); |
205 |
kernel_pmap.refcnt = 1; |
kernel_pmap.refcnt = 1; |
206 |
|
trace_printf ("kernel pde at %p", kernel_pmap.pde); |
207 |
|
|
208 |
/* Since we not yet have switched into paged mode we can clear the page. */ |
/* Since we not yet have switched into paged mode we can clear the page. */ |
|
|
|
209 |
memset (kernel_pmap.pde, 0, VM_PAGE_SIZE); |
memset (kernel_pmap.pde, 0, VM_PAGE_SIZE); |
210 |
|
|
211 |
/* We map the whole kernel into the virtual address space. */ |
/* We map the whole kernel into the virtual address space. */ |
212 |
|
PMAP_ENTER_RANGE (PMAP_KERNEL (), 0x1000, vm_round_page (memory_top), 0x1000, |
|
#if 0 |
|
|
PMAP_ENTER_RANGE (PMAP_KERNEL (), vm_trunc_page (kernel_start), |
|
|
vm_round_page (kernel_end), vm_trunc_page (kernel_start), |
|
|
VM_PROT_ALL, true); |
|
|
#else |
|
|
PMAP_ENTER_RANGE (PMAP_KERNEL (), 0x1000, |
|
|
vm_round_page (memory_top), 0x1000, |
|
213 |
VM_PROT_ALL, true); |
VM_PROT_ALL, true); |
214 |
#endif |
|
215 |
|
trace_printf ("physical segments:"); |
216 |
|
for (ps = vm_physmem, i = 0; i < vm_physmem_count; i++, ps++) |
217 |
|
{ |
218 |
|
trace_printf (" %08x - %08x (avail %08x - %08x)", ps->start, ps->end, |
219 |
|
ps->avail_start, ps->avail_end); |
220 |
|
} |
221 |
|
|
222 |
/* Load the page directory and enable paging, by setting the PG |
/* Load the page directory and enable paging, by setting the PG |
223 |
bit in the CR0 register. */ |
bit in the CR0 register. */ |
|
|
|
224 |
set_cr3 ((void *) PMAP_KERNEL()->pde); |
set_cr3 ((void *) PMAP_KERNEL()->pde); |
225 |
__asm__ ("movl %0, %%cr0; jmp 0f; 0:" :: "r" (get_cr0() | 0x80000000)); |
__asm__ ("movl %0, %%cr0; jmp 0f; 0:" :: "r" (get_cr0() | 0x80000000)); |
226 |
|
|
227 |
|
|
228 |
|
/* We allocate memory for the physical mapping table. */ |
229 |
|
pm_table = (struct pm_ent **) pmap_steal_memory (sizeof (struct pm_ent *) |
230 |
|
* vm_atop (physical_top)); |
231 |
|
memset (pm_table, 0, sizeof (struct pm_ent *) * vm_atop (physical_top)); |
232 |
|
|
233 |
|
trace_printf ("pm table at %p (%d kbytes)", pm_table, |
234 |
|
sizeof (struct pm_ent *) * vm_atop (physical_top) / 1024); |
235 |
|
|
236 |
/* ??? allocate virtual address space for special PTE's. */ |
/* ??? allocate virtual address space for special PTE's. */ |
237 |
} |
} |
238 |
|
|
239 |
/* Return virtual memory address space in VSTARTP and VENDP. */ |
/* Return virtual memory address space in VSTARTP and VENDP. */ |
|
|
|
240 |
void |
void |
241 |
pmap_virtual_memory (vm_offset_t *vstartp, vm_offset_t *vendp) |
pmap_virtual_memory (vm_offset_t *vstartp, vm_offset_t *vendp) |
242 |
{ |
{ |
247 |
|
|
248 |
/* This initializes the PMAP system. This is called right before the |
/* This initializes the PMAP system. This is called right before the |
249 |
VM system is bootstraped. */ |
VM system is bootstraped. */ |
|
|
|
250 |
void |
void |
251 |
pmap_init (void) |
pmap_init (void) |
252 |
{ |
{ |
253 |
/* Create a new memory zone for the pmap structures */ |
/* Create a new memory zone for the pmap structures */ |
254 |
pmap_cache = kmem_cache_create ("pmap cache", sizeof (struct pmap), 0); |
pmap_cache = kmem_cache_create ("pmap cache", sizeof (struct pmap), 0); |
255 |
|
pm_ent_cache = kmem_cache_create ("pm ent cache", sizeof (struct pm_ent), |
256 |
|
CACHE_NEVER_EMPTY_FLAG); |
257 |
} |
} |
258 |
|
|
259 |
/* Create a new pmap. Return it. The pmap have one reference |
/* Create a new pmap. Return it. The pmap have one reference |
260 |
for being alive. */ |
for being alive. */ |
|
|
|
261 |
struct pmap * |
struct pmap * |
262 |
pmap_new (void) |
pmap_new (void) |
263 |
{ |
{ |
277 |
} |
} |
278 |
|
|
279 |
/* Destroy PMAP - release all memory if reference counter drops to zero. */ |
/* Destroy PMAP - release all memory if reference counter drops to zero. */ |
|
|
|
280 |
void |
void |
281 |
pmap_destroy (struct pmap *pmap) |
pmap_destroy (struct pmap *pmap) |
282 |
{ |
{ |
291 |
true we create new page directory entries if we encounter a empty |
true we create new page directory entries if we encounter a empty |
292 |
directory entry. If FN returns anything than zero the function |
directory entry. If FN returns anything than zero the function |
293 |
returns that value direct. PA is physical memory address. */ |
returns that value direct. PA is physical memory address. */ |
|
|
|
294 |
static int |
static int |
295 |
iterate_pmap (struct pmap *pmap, void *start, void *end, void *pa, bool expand, |
iterate_pmap (struct pmap *pmap, void *start, void *end, void *pa, bool expand, |
296 |
int (*fn) (struct pmap *, void *, void *, void **, void *), |
int (*fn) (struct pmap *, void *, void *, void **, void *), |
317 |
if (! (((int) (*pde)) & PTE_VALID)) |
if (! (((int) (*pde)) & PTE_VALID)) |
318 |
{ |
{ |
319 |
if (expand == false) |
if (expand == false) |
320 |
return 1; |
{ |
321 |
|
start = start + (1 << 22); |
322 |
|
goto try_next_pde; |
323 |
|
} |
324 |
|
|
325 |
for (tries = 0, page = 0; tries < 3 && !page; tries++) |
for (tries = 0, page = 0; tries < 3 && !page; tries++) |
326 |
page = (void *) pmap_grab_page (); |
page = (void *) pmap_grab_page (); |
331 |
return 1; |
return 1; |
332 |
} |
} |
333 |
|
|
334 |
|
memset (page, 0, VM_PAGE_SIZE); |
335 |
*pde = (void *) ((unsigned int) page | _PROT_PD_ENTRY | PTE_VALID); |
*pde = (void *) ((unsigned int) page | _PROT_PD_ENTRY | PTE_VALID); |
336 |
} |
} |
337 |
|
|
352 |
start = start + VM_PAGE_SIZE; |
start = start + VM_PAGE_SIZE; |
353 |
pa = pa + VM_PAGE_SIZE; |
pa = pa + VM_PAGE_SIZE; |
354 |
} |
} |
355 |
|
|
356 |
|
try_next_pde: |
357 |
pde++; |
pde++; |
358 |
} |
} |
359 |
return 0; |
return 0; |
360 |
} |
} |
361 |
|
|
362 |
static int |
static int |
363 |
|
enter_iterate_fn_wired (struct pmap *pmap, void *va, void *pa, void **pte, |
364 |
|
void *arg) |
365 |
|
{ |
366 |
|
*pte = (void *) ((unsigned int) pa | (unsigned int) arg); |
367 |
|
return 0; |
368 |
|
} |
369 |
|
|
370 |
|
static int |
371 |
enter_iterate_fn (struct pmap *pmap, void *va, void *pa, void **pte, |
enter_iterate_fn (struct pmap *pmap, void *va, void *pa, void **pte, |
372 |
void *arg) |
void *arg) |
373 |
{ |
{ |
374 |
|
pm_insert_entry (pmap, (vm_offset_t) pa, (vm_offset_t) va); |
375 |
|
|
376 |
*pte = (void *) ((unsigned int) pa | (unsigned int) arg); |
*pte = (void *) ((unsigned int) pa | (unsigned int) arg); |
377 |
return 0; |
return 0; |
378 |
} |
} |
379 |
|
|
380 |
void |
void |
381 |
pmap_enter (struct pmap *pmap, vm_offset_t va, vm_offset_t pa, unsigned int prot) |
pmap_enter (struct pmap *pmap, vm_offset_t va, vm_offset_t pa, unsigned int prot, |
382 |
|
int wired_p) |
383 |
{ |
{ |
384 |
iterate_pmap (pmap, (void *) va, (void *) (va + VM_PAGE_SIZE), |
iterate_pmap (pmap, (void *) va, (void *) (va + VM_PAGE_SIZE), |
385 |
(void *) pa, true, enter_iterate_fn, |
(void *) pa, true, wired_p ? enter_iterate_fn_wired : enter_iterate_fn, |
386 |
(void *) convert_real_prot (prot)); |
(void *) convert_real_prot (prot)); |
387 |
} |
} |
388 |
|
|
389 |
void |
void |
390 |
pmap_enter_range (struct pmap *pmap, vm_offset_t start, vm_offset_t end, |
pmap_enter_range (struct pmap *pmap, vm_offset_t start, vm_offset_t end, |
391 |
vm_offset_t pa, unsigned int prot) |
vm_offset_t pa, unsigned int prot, int wired_p) |
392 |
{ |
{ |
393 |
int err; |
int err; |
394 |
err = iterate_pmap (pmap, (void *) start, (void *) end, |
err = iterate_pmap (pmap, (void *) start, (void *) end, |
395 |
(void *) pa, true, enter_iterate_fn, |
(void *) pa, true, wired_p ? enter_iterate_fn_wired : enter_iterate_fn, |
396 |
(void *) convert_real_prot (prot)); |
(void *) convert_real_prot (prot)); |
397 |
assert (err == 0); |
assert (err == 0); |
398 |
} |
} |
413 |
(void *) convert_real_prot (prot)); |
(void *) convert_real_prot (prot)); |
414 |
} |
} |
415 |
|
|
416 |
|
int |
417 |
|
pmap_display (struct pmap *pmap) |
418 |
|
{ |
419 |
|
vm_offset_t start, end; |
420 |
|
void **ptd, **pte, **pde; |
421 |
|
|
422 |
|
trace_printf ("pmap display (pmap)", pmap); |
423 |
|
|
424 |
|
start = 0; |
425 |
|
end = 0xf0000000; |
426 |
|
|
427 |
|
/* We loop through the pmap, starting at START, until we reach |
428 |
|
END or encounters an error. */ |
429 |
|
pde = & pmap->pde [PDE_NUMBER (start)]; |
430 |
|
while (start < end) |
431 |
|
{ |
432 |
|
/* If we encounter an empty page directory we try to allocate one, |
433 |
|
if we should (if expand == true). */ |
434 |
|
if (! (((int) (*pde)) & PTE_VALID)) |
435 |
|
{ |
436 |
|
pde++; |
437 |
|
start = start + (1 << PDE_SHIFT); |
438 |
|
continue; |
439 |
|
} |
440 |
|
|
441 |
|
trace_printf ("pde for address %x -> pde %08x", start, *pde); |
442 |
|
|
443 |
|
/* Loop through all page table entries and call FN on each |
444 |
|
one of them. We return if we encounter an error. */ |
445 |
|
ptd = (void **) ((unsigned int) (*pde) & ~VM_PAGE_MASK); |
446 |
|
pte = & ptd [PTE_NUMBER (start)]; |
447 |
|
|
448 |
|
for(; pte < &ptd [PTES_PER_PAGE] && start < end; pte++) |
449 |
|
{ |
450 |
|
if (((unsigned int) *pte) & 1) |
451 |
|
trace_printf (" va %08x -> pte %08x", start, (unsigned int) *pte); |
452 |
|
|
453 |
|
start = start + VM_PAGE_SIZE; |
454 |
|
} |
455 |
|
pde++; |
456 |
|
} |
457 |
|
return 0; |
458 |
|
} |
459 |
|
|
460 |
vm_offset_t |
vm_offset_t |
461 |
pmap_extract (struct pmap *pmap, vm_offset_t va) |
pmap_extract (struct pmap *pmap, vm_offset_t va) |
462 |
{ |
{ |
473 |
} |
} |
474 |
|
|
475 |
/* Change protection for all virtual maps of physical page PA to PROT. */ |
/* Change protection for all virtual maps of physical page PA to PROT. */ |
|
|
|
476 |
void |
void |
477 |
pmap_page_protect (vm_offset_t pa, vm_prot_t prot) |
pmap_page_protect (vm_offset_t pa, vm_prot_t prot) |
478 |
{ |
{ |
479 |
|
struct pm_ent *pme; |
480 |
|
|
481 |
|
if (! pm_table) |
482 |
|
return; |
483 |
|
|
484 |
|
for (pme = pm_table [vm_atop (pa)]; pme; pme = pme->pme_next) |
485 |
|
{ |
486 |
|
#if 0 |
487 |
|
trace_printf ("chaning protection for pa %x (mapped at <%p, %x>) to %d", |
488 |
|
pa, pme->pme_pmap, pme->pme_va); |
489 |
|
#endif |
490 |
|
pmap_protect (pme->pme_pmap, (void *) pme->pme_va, prot); |
491 |
|
} |
492 |
|
pmap_flush_tlb (); |
493 |
} |
} |
494 |
|
|
495 |
/* Active PMAP. */ |
/* Active PMAP. */ |
|
|
|
496 |
void |
void |
497 |
pmap_activate (struct pmap *pmap) |
pmap_activate (struct pmap *pmap) |
498 |
{ |
{ |
504 |
|
|
505 |
/* Steal physical memory. Memory is returned, or NULL if we fail to |
/* Steal physical memory. Memory is returned, or NULL if we fail to |
506 |
allocate SIZE bytes. */ |
allocate SIZE bytes. */ |
|
|
|
507 |
vm_offset_t |
vm_offset_t |
508 |
pmap_steal_memory (unsigned int size) |
pmap_steal_memory (unsigned int size) |
509 |
{ |
{ |
517 |
|
|
518 |
/* PA 0 will never be among those given to UVM so we can use it |
/* PA 0 will never be among those given to UVM so we can use it |
519 |
to indicate we couldn't steal any memory. */ |
to indicate we couldn't steal any memory. */ |
|
|
|
520 |
for (ps = vm_physmem, bank = 0; bank < vm_physmem_count; bank++, ps++) |
for (ps = vm_physmem, bank = 0; bank < vm_physmem_count; bank++, ps++) |
521 |
{ |
{ |
522 |
if (vm_atop (ps->avail_end - ps->avail_start) >= npgs) |
if (vm_atop (ps->avail_end - ps->avail_start) >= npgs) |
532 |
|
|
533 |
/* If we've used up all the pages in the segment, remove it and |
/* If we've used up all the pages in the segment, remove it and |
534 |
compact the list. */ |
compact the list. */ |
|
|
|
535 |
if (ps->start == ps->end) |
if (ps->start == ps->end) |
536 |
{ |
{ |
537 |
/* If this was the last one, then a very bad thing has occurred. */ |
/* If this was the last one, then a very bad thing has occurred. */ |
545 |
|
|
546 |
va = (vm_offset_t) virtual_start; |
va = (vm_offset_t) virtual_start; |
547 |
|
|
548 |
|
trace_printf ("pmap steal memory: stealing at pa %x (-%x)", pa, pa + size); |
549 |
while (size) |
while (size) |
550 |
{ |
{ |
551 |
PMAP_ENTER (PMAP_KERNEL (), virtual_start, pa, VM_PROT_ALL, |
PMAP_ENTER (PMAP_KERNEL (), virtual_start, pa, VM_PROT_ALL, |