20 |
#include "vm-object.h" |
#include "vm-object.h" |
21 |
#include "vm-page.h" |
#include "vm-page.h" |
22 |
#include "trace.h" |
#include "trace.h" |
23 |
|
#include "thread.h" |
24 |
|
|
25 |
/* Fault at OFFSET in MAP. FAULT_TYPE is either VM_PROT_READ or |
/* Clean up the result of vm_fault_page. |
26 |
VM_PROT_WRITE. KERNEL_P is true if the fault was from kernel mode. |
The paging reference for OBJECT is released. OBJECT is unlocked. |
27 |
Returns KERN_SUCCESS if fault could be resolved. */ |
If TOP_PAGE is not NULL, TOP_PAGE is freed and the paging reference |
28 |
|
for the object containing it is released. */ |
29 |
kern_return_t |
void |
30 |
vm_fault (struct vm_map *map, vm_offset_t offset, vm_prot_t fault_type, |
vm_fault_cleanup (struct vm_object *object, struct vm_page *top_page) |
|
bool kernel_p) |
|
31 |
{ |
{ |
32 |
struct vm_map_entry *entry; |
vm_object_paging_end (object); |
33 |
struct vm_map *real_map; |
vm_object_unlock (object); |
|
struct vm_object *object; |
|
|
struct vm_page *page; |
|
|
kern_return_t kr; |
|
|
vm_prot_t prot; |
|
|
vm_offset_t map_offset, address; |
|
|
bool wired_p; |
|
|
|
|
|
|
|
|
/* If we have a fault from user mode we subtract user mode offset. */ |
|
34 |
|
|
35 |
map_offset = kernel_p ? offset : offset - VM_USER_MIN_ADDRESS; |
if (top_page != 0) |
36 |
|
{ |
37 |
|
object = top_page->object; |
38 |
|
vm_object_lock (object); |
39 |
|
vm_page_free (top_page); |
40 |
|
vm_object_paging_end (object); |
41 |
|
vm_object_unlock (object); |
42 |
|
} |
43 |
|
} |
44 |
|
|
45 |
/* First we try to lookup page in page hash table. */ |
/* Fault on offset FIRST_OFFSET in FIRST_OBJECT. FAULT_TYPE is |
46 |
|
either read, write or execute (???). If we succed, page is |
47 |
|
is returned in *PAGEP. */ |
48 |
|
kern_return_t |
49 |
|
vm_fault_page (struct vm_object *first_object, vm_offset_t first_offset, |
50 |
|
vm_prot_t fault_type, struct vm_page **result_page, |
51 |
|
struct vm_page **top_page, vm_prot_t *protection) |
52 |
|
{ |
53 |
|
struct vm_object *object, *next_object; |
54 |
|
struct vm_page *m, *first_m = 0; |
55 |
|
vm_offset_t offset; |
56 |
|
int look_for_page_p; |
57 |
|
|
58 |
real_map = map; |
object = first_object; |
59 |
kr = vm_map_lookup (&real_map, map_offset, fault_type, &object, &address, |
offset = first_offset; |
|
&prot, &wired_p, &entry); |
|
|
if (kr != KERN_SUCCESS) |
|
|
return kr; |
|
60 |
|
|
61 |
page = vm_page_lookup (object, map_offset - address); |
vm_object_lock (object); |
62 |
|
while (1) |
63 |
|
{ |
64 |
|
/* Look for page at OFFSET in OBJECT. */ |
65 |
|
m = vm_page_lookup (object, offset); |
66 |
|
|
67 |
|
if (m != 0) |
68 |
|
{ |
69 |
|
/* If page is in transit (ie busy) we must wait for it |
70 |
|
to become available. We just assert and block here. */ |
71 |
|
if (m->busy_p) |
72 |
|
{ |
73 |
|
vm_object_unlock (object); |
74 |
|
VM_PAGE_WAIT (m); |
75 |
|
vm_object_lock (object); |
76 |
|
continue; |
77 |
|
} |
78 |
|
|
79 |
/* We did not find a page; this can be the result of two things: |
/* If the page is in error, give up now. */ |
80 |
|
if (m->error_p) |
81 |
|
{ |
82 |
|
vm_page_free (m); |
83 |
|
vm_fault_cleanup (object, first_m); |
84 |
|
return VM_FAULT_MEMORY_ERROR; |
85 |
|
} |
86 |
|
|
87 |
1. No page have been allocated in object. |
/* If page is absent, and not busy, this means that we |
88 |
2. We have a shared object. |
should allocate a new page and zero fill it. */ |
89 |
|
if (m->absent_p) |
90 |
|
{ |
91 |
|
/* Remove the non-existent page (unless it's |
92 |
|
in the top object) and move on down to the |
93 |
|
next object (if there is one). */ |
94 |
|
|
95 |
|
offset += object->shadow_offset; |
96 |
|
/* access_required = VM_PROT_READ; */ |
97 |
|
next_object = object->shadow_object; |
98 |
|
if (next_object == 0) |
99 |
|
{ |
100 |
|
struct vm_page *real_m; |
101 |
|
|
102 |
|
/* Absent page at bottom of shadow chain; |
103 |
|
zero fill the page we left busy in the first object, |
104 |
|
and flush the absent page. But first we need to |
105 |
|
allocate a real page. */ |
106 |
|
|
107 |
|
real_m = vm_page_allocate (); |
108 |
|
if (! real_m) |
109 |
|
{ |
110 |
|
vm_fault_cleanup (object, first_m); |
111 |
|
return VM_FAULT_MEMORY_SHORTAGE; |
112 |
|
} |
113 |
|
|
114 |
|
if (object != first_object) |
115 |
|
{ |
116 |
|
vm_page_free (m); |
117 |
|
vm_object_paging_end (object); |
118 |
|
vm_object_unlock (object); |
119 |
|
object = first_object; |
120 |
|
offset = first_offset; |
121 |
|
m = first_m; |
122 |
|
first_m = 0; |
123 |
|
vm_object_lock (object); |
124 |
|
} |
125 |
|
|
126 |
|
vm_page_free (m); |
127 |
|
assert (real_m->busy_p); |
128 |
|
vm_page_insert (real_m, object, offset); |
129 |
|
m = real_m; |
130 |
|
|
131 |
|
/* Drop the lock while zero filling page. |
132 |
|
Then break because this is the page we wanted. |
133 |
|
|
134 |
|
Checking the page lock is a waste of time; |
135 |
|
this page was either absent or newly allocated |
136 |
|
-- in both cases it can't be page locked by a pager. */ |
137 |
|
vm_object_unlock(object); |
138 |
|
vm_page_zero_fill (m); |
139 |
|
vm_object_lock(object); |
140 |
|
break; |
141 |
|
} |
142 |
|
else |
143 |
|
{ |
144 |
|
if (object != first_object) |
145 |
|
{ |
146 |
|
vm_object_paging_end (object); |
147 |
|
vm_page_free (m); |
148 |
|
} |
149 |
|
else |
150 |
|
{ |
151 |
|
first_m = m; |
152 |
|
m->absent_p = 0; |
153 |
|
m->busy_p = 1; |
154 |
|
|
155 |
|
VM_PAGE_QUEUES_REMOVE(m); |
156 |
|
} |
157 |
|
vm_object_lock (next_object); |
158 |
|
vm_object_unlock (object); |
159 |
|
object = next_object; |
160 |
|
vm_object_paging_begin (object); |
161 |
|
continue; |
162 |
|
} |
163 |
|
} |
164 |
|
|
165 |
If the object is not a shared object (either read-write or |
assert(! m->busy_p); |
166 |
copy-on-write) we allocate a fresh new page and insert it into |
m->busy_p = 1; |
167 |
the object. Otherwise we look at the shadowed object. */ |
assert(! m->absent_p); |
168 |
|
break; |
169 |
|
} |
170 |
|
|
171 |
if (page == 0) |
look_for_page_p = object->pager_created_p == 1; |
|
{ |
|
|
struct vm_object *src_object = object->shadow_object; |
|
172 |
|
|
173 |
/* We loop through all shadow object and look for pages. */ |
/* If either we should look for page in pager, or OBJECT |
174 |
|
is the first object we must allocate a fictitious page. */ |
175 |
|
if (look_for_page_p || object == first_object) |
176 |
|
{ |
177 |
|
/* Allocate a new page for this object/offset pair. */ |
178 |
|
m = vm_page_grab_fictitious (); |
179 |
|
if (! m) |
180 |
|
{ |
181 |
|
vm_fault_cleanup (object, first_m); |
182 |
|
return VM_FAULT_FICTITIOUS_SHORTAGE; |
183 |
|
} |
184 |
|
vm_page_insert (m, object, offset); |
185 |
|
} |
186 |
|
|
187 |
while (src_object) |
if (look_for_page_p) |
188 |
{ |
{ |
189 |
page = vm_page_lookup (src_object, (map_offset - address) |
if (! object->pager_ready_p) |
190 |
+ object->shadow_offset); |
{ |
191 |
if (page) |
vm_page_free (m); |
192 |
break; |
vm_object_unlock (object); |
193 |
|
vm_object_wait (object, VM_OBJECT_EVENT_PAGER_READY); |
194 |
|
vm_object_lock (object); |
195 |
|
continue; |
196 |
|
} |
197 |
|
|
198 |
src_object = src_object->shadow_object; |
/* Indicate that the page is waiting for data |
199 |
|
from the memory manager. */ |
200 |
|
m->absent_p = 1; |
201 |
|
|
202 |
|
vm_object_unlock (object); |
203 |
|
|
204 |
|
/* Call the memory manager to retrieve the data. */ |
205 |
|
PAGER_PAGE_REQUEST (object, m->offset + object->pager_offset, |
206 |
|
VM_PROT_ALL, m); |
207 |
|
|
208 |
|
/* Retry with same object/offset, since new data may |
209 |
|
be in a different page (i.e., m is meaningless at |
210 |
|
this point). */ |
211 |
|
vm_object_lock (object); |
212 |
|
continue; |
213 |
} |
} |
214 |
|
|
215 |
|
/* If this is the first object, record page as first page. */ |
216 |
|
if (object == first_object) |
217 |
|
first_m = m; |
218 |
|
|
219 |
/* If we did not found a page in the shadow objects (if any) |
/* Check if we should step into the next object. */ |
220 |
we consult the pager. If we do not have a pager, or if it |
offset += object->shadow_offset; |
|
tells us to allocate a zero-filled page we do that. */ |
|
221 |
|
|
222 |
if (page == 0) |
next_object = object->shadow_object; |
223 |
|
if (next_object == 0) |
224 |
{ |
{ |
225 |
if (object->pager_object) |
/* If there's no object left, fill the page in the top |
226 |
{ |
object with zeros. But first we need to allocate |
227 |
/* ??? prot or fault_type? */ |
a real page. */ |
228 |
kr = PAGER_PAGE_REQUEST (object, (map_offset - address), |
if (object != first_object) |
229 |
fault_type, &page); |
{ |
230 |
if (kr != KERN_SUCCESS) |
vm_object_paging_end (object); |
231 |
return kr; |
vm_object_unlock (object); |
232 |
if (page == 0) |
|
233 |
goto alloc_zero_page; |
object = first_object; |
234 |
|
offset = first_offset; |
235 |
|
vm_object_lock (object); |
236 |
} |
} |
237 |
else |
|
238 |
|
m = first_m; |
239 |
|
first_m = 0; |
240 |
|
|
241 |
|
if (m->fictitious_p && !vm_page_convert (m)) |
242 |
{ |
{ |
243 |
alloc_zero_page: |
vm_page_free (m); |
244 |
page = vm_page_allocate (); |
panic ("can not convert page"); |
|
if (page == 0) |
|
|
return KERN_RESOURCE_SHORTAGE; |
|
245 |
} |
} |
246 |
|
|
247 |
/* Insert page into object. */ |
vm_page_zero_fill (m); |
248 |
vm_page_insert (page, object, map_offset - address); |
break; |
249 |
|
} |
250 |
|
else |
251 |
|
{ |
252 |
|
vm_object_lock (next_object); |
253 |
|
if ((object != first_object)) |
254 |
|
vm_object_paging_end (object); |
255 |
|
vm_object_unlock (object); |
256 |
|
object = next_object; |
257 |
|
vm_object_paging_begin (object); |
258 |
} |
} |
259 |
|
} |
260 |
|
|
261 |
/* If we found a page in the shadow object and we are doing |
/* PAGE HAS BEEN FOUND. |
|
a write access we must copy the page and insert it in our |
|
|
"real" object. ??? locking? */ |
|
262 |
|
|
263 |
else if (fault_type & VM_PROT_WRITE) |
This page (m) is: |
264 |
{ |
- busy, so that we can play with it; |
265 |
struct vm_page *new_page; |
- not absent, so that nobody else will fill it; |
266 |
|
- possibly eligible for pageout; |
267 |
|
|
268 |
new_page = vm_page_allocate (); |
The top-level page (first_m) is: |
269 |
if (new_page == 0) |
- NULL if the page was found in the top-level object; |
270 |
return KERN_RESOURCE_SHORTAGE; |
- busy, not absent, and ineligible for pageout. |
271 |
|
|
272 |
vm_page_copy (new_page, page); |
The current object (object) is locked. |
273 |
|
|
274 |
/* Insert page into object. */ |
??? a paging reference is held for the current |
275 |
|
and top-level objects. */ |
276 |
|
|
|
vm_page_insert (new_page, object, map_offset - address); |
|
|
page = new_page; |
|
|
} |
|
277 |
|
|
278 |
/* If we found a page and we are doing a read access we lower |
if (object != first_object) |
279 |
the protection for the page to read-only. */ |
{ |
280 |
|
/* We only really need to copy if we want to write it. */ |
281 |
|
if (fault_type & VM_PROT_WRITE) |
282 |
|
{ |
283 |
|
struct vm_page *copy_m; |
284 |
|
|
285 |
|
/* Allocate a page for the copy. */ |
286 |
|
copy_m = vm_page_allocate (); |
287 |
|
if (copy_m == 0) |
288 |
|
{ |
289 |
|
VM_PAGE_WAKEUP_DONE (m); |
290 |
|
if (!m->active_p && !m->inactive_p) |
291 |
|
vm_page_activate (m); |
292 |
|
vm_fault_cleanup(object, first_m); |
293 |
|
return VM_FAULT_MEMORY_SHORTAGE; |
294 |
|
} |
295 |
|
|
296 |
|
vm_object_unlock (object); |
297 |
|
vm_page_copy (copy_m, m); |
298 |
|
vm_object_lock (object); |
299 |
|
|
300 |
|
/* If another map is truly sharing this |
301 |
|
page with us, we have to flush all |
302 |
|
uses of the original page, since we |
303 |
|
can't distinguish those which want the |
304 |
|
original from those which need the |
305 |
|
new copy. |
306 |
|
|
307 |
|
??? If we know that only one map has |
308 |
|
access to this page, then we could |
309 |
|
avoid the pmap_page_protect() call. */ |
310 |
|
|
311 |
|
vm_page_deactivate (m); |
312 |
|
pmap_page_protect(m->phys_addr, VM_PROT_NONE); |
313 |
|
vm_page_unlock_queues(); |
314 |
|
|
315 |
|
/* We no longer need the old page or object. */ |
316 |
|
VM_PAGE_WAKEUP_DONE (m); |
317 |
|
|
318 |
|
vm_object_paging_end (object); |
319 |
|
vm_object_unlock (object); |
320 |
|
|
321 |
|
object = first_object; |
322 |
|
offset = first_offset; |
323 |
|
|
324 |
|
vm_object_lock (object); |
325 |
|
vm_page_free (first_m); |
326 |
|
first_m = 0; |
327 |
|
assert (copy_m->busy_p); |
328 |
|
vm_page_insert(copy_m, object, offset); |
329 |
|
m = copy_m; |
330 |
|
|
331 |
else if (fault_type & VM_PROT_READ) |
#if 0 |
332 |
prot = VM_PROT_READ; |
/* Now that we've gotten the copy out of the |
333 |
|
way, let's try to collapse the top object. |
334 |
|
But we have to play ugly games with |
335 |
|
paging_in_progress to do that. */ |
336 |
|
vm_object_paging_end(object); |
337 |
|
vm_object_collapse(object); |
338 |
|
vm_object_paging_begin(object); |
339 |
|
#endif |
340 |
|
} |
341 |
|
|
342 |
|
/* If we just try to read the page, no need to copy it. |
343 |
|
Just lower the mapping protection to write-protected. */ |
344 |
|
else |
345 |
|
*protection &= (~VM_PROT_WRITE); |
346 |
} |
} |
347 |
|
|
348 |
|
if (fault_type & VM_PROT_WRITE) |
349 |
|
m->dirty_p = 1; |
350 |
|
|
351 |
|
*result_page = m; |
352 |
|
*top_page = first_m; |
353 |
|
|
354 |
|
return VM_FAULT_SUCCESS; |
355 |
|
} |
356 |
|
|
357 |
|
|
358 |
|
/* Fault at OFFSET in MAP. FAULT_TYPE is either VM_PROT_READ or |
359 |
|
VM_PROT_WRITE. KERNEL_P is true if the fault was from kernel mode. |
360 |
|
Returns KERN_SUCCESS if fault could be resolved. */ |
361 |
|
kern_return_t |
362 |
|
vm_fault (struct vm_map *map, vm_offset_t offset, vm_prot_t fault_type, |
363 |
|
bool kernel_p) |
364 |
|
{ |
365 |
|
struct vm_object *object; |
366 |
|
struct vm_page *page, *first_page; |
367 |
|
vm_offset_t map_offset, address; |
368 |
|
struct vm_map_entry *entry; |
369 |
|
struct vm_map *real_map; |
370 |
|
kern_return_t kr; |
371 |
|
vm_prot_t prot; |
372 |
|
bool wired_p, done_p = 0; |
373 |
|
|
374 |
|
/* If we have a fault from user mode we subtract user mode offset. */ |
375 |
|
map_offset = kernel_p ? offset : offset - VM_USER_MIN_ADDRESS; |
376 |
|
|
377 |
/* If we found a page - see if someone else wants it aswell |
while (! done_p) |
|
(when we are a original object for a copy-on-write object). */ |
|
|
else |
|
378 |
{ |
{ |
379 |
|
real_map = map; |
380 |
|
kr = vm_map_lookup (&real_map, map_offset, fault_type, &object, |
381 |
|
&address, &prot, &wired_p, &entry); |
382 |
|
if (kr != KERN_SUCCESS) |
383 |
|
return kr; |
384 |
|
|
385 |
/* Check so that we do not exceed protection. */ |
/* Check so that we do not exceed protection. */ |
386 |
if ((fault_type & prot) != fault_type) |
if ((fault_type & prot) != fault_type) |
387 |
return KERN_PROTECTION_FAILURE; |
return KERN_PROTECTION_FAILURE; |
388 |
|
|
389 |
trace_printf ("??? implement found page"); |
/* Make a reference to this object to prevent its disposal while |
390 |
/* ??? implement */ |
we are messing with it. Once we have the reference, the map |
391 |
} |
is free to be diddled. Since objects reference their shadows |
392 |
|
(and copies), they will stay around as well. */ |
393 |
|
assert (object->refcnt > 0); |
394 |
|
object->refcnt++; |
395 |
|
vm_object_paging_begin (object); |
396 |
|
|
397 |
|
kr = vm_fault_page (object, map_offset - address, fault_type, |
398 |
|
& page, & first_page, &prot); |
399 |
|
|
400 |
|
/* If we didn't succeed, lose the object reference immediately. */ |
401 |
|
if (kr != KERN_SUCCESS) |
402 |
|
vm_object_deallocate (object); |
403 |
|
|
404 |
#if 0 |
switch (kr) |
405 |
trace_printf ("map page %p (%p) at %x in pmap %p (%s)", |
{ |
406 |
page, page->phys_addr, offset, map->pmap, |
case VM_FAULT_SUCCESS: |
407 |
prot == VM_PROT_READ |
done_p = 1; |
408 |
? "read" : prot == VM_PROT_WRITE |
break; |
409 |
? "write" : "all"); |
|
410 |
#endif |
case VM_FAULT_FICTITIOUS_SHORTAGE: |
411 |
|
vm_page_more_fictitious (); |
412 |
|
continue; |
413 |
|
|
414 |
|
case VM_FAULT_MEMORY_ERROR: |
415 |
|
case VM_FAULT_MEMORY_SHORTAGE: |
416 |
|
return KERN_FAILURE; |
417 |
|
|
418 |
/* Enter the page into the physical map and return success. */ |
case VM_FAULT_PROTECTION_FAILURE: |
419 |
|
return KERN_PROTECTION_FAILURE; |
420 |
|
|
421 |
|
case VM_FAULT_RETRY: |
422 |
|
continue; |
423 |
|
} |
424 |
|
} |
425 |
|
|
426 |
|
/* Enter the page into the physical map and return success. */ |
427 |
PMAP_ENTER (map->pmap, offset, page->phys_addr, prot, wired_p); |
PMAP_ENTER (map->pmap, offset, page->phys_addr, prot, wired_p); |
428 |
|
|
429 |
|
/* If the page is not wired down and isn't already |
430 |
|
on a pageout queue, then put it where the pageout |
431 |
|
daemon can find it. */ |
432 |
|
if (!page->active_p && !page->inactive_p) |
433 |
|
vm_page_activate (page); |
434 |
|
|
435 |
|
VM_PAGE_WAKEUP_DONE (page); |
436 |
|
|
437 |
|
vm_fault_cleanup (page->object, first_page); |
438 |
|
vm_object_deallocate (object); |
439 |
|
|
440 |
return KERN_SUCCESS; |
return KERN_SUCCESS; |
441 |
} |
} |
442 |
|
|