23 |
#include "vm-slab.h" |
#include "vm-slab.h" |
24 |
#include "trace.h" |
#include "trace.h" |
25 |
#include "libkern.h" |
#include "libkern.h" |
26 |
|
#include "ipc-port.h" |
27 |
|
#include "thread.h" |
28 |
|
#include "vm-pageout.h" |
29 |
|
|
30 |
/* Object for all kernel memory. */ |
/* Object for all kernel memory. */ |
31 |
struct vm_object *object_kernel; |
struct vm_object *object_kernel; |
38 |
|
|
39 |
/* Allocate a new memory object. SIZE is the size of the object, |
/* Allocate a new memory object. SIZE is the size of the object, |
40 |
should be page aligned. */ |
should be page aligned. */ |
|
|
|
41 |
struct vm_object * |
struct vm_object * |
42 |
vm_object_allocate (unsigned int size) |
vm_object_allocate (unsigned int size) |
43 |
{ |
{ |
50 |
*object = *object_template; |
*object = *object_template; |
51 |
object->size = size; |
object->size = size; |
52 |
queue_init (&object->pageq); |
queue_init (&object->pageq); |
53 |
|
thread_lock_init (&object->lock, 1, 1); |
54 |
|
|
55 |
return object; |
return object; |
56 |
} |
} |
57 |
|
|
58 |
/* Get a reference to OBJECT. */ |
/* Get a reference to OBJECT. */ |
|
|
|
59 |
void |
void |
60 |
vm_object_reference (struct vm_object *object) |
vm_object_reference (struct vm_object *object) |
61 |
{ |
{ |
62 |
|
vm_object_lock (object); |
63 |
object->refcnt++; |
object->refcnt++; |
64 |
|
vm_object_unlock (object); |
65 |
} |
} |
66 |
|
|
67 |
/* Destroy OBJECT. If all references to it is lost, we deallocate |
/* We want to map memory object STORE. PAGER is the pager facility |
68 |
it as well. */ |
that communicates with STORE. SIZE is the size of the object. |
69 |
|
|
70 |
void |
If INTERNAL_P is true we create a new object in the default |
71 |
vm_object_destroy (struct vm_object *object) |
pager. */ |
72 |
|
|
73 |
|
struct vm_object * |
74 |
|
vm_object_enter (struct vm_pager *pager, struct ipc_port *store, |
75 |
|
vm_size_t size, int internal_p) |
76 |
{ |
{ |
77 |
struct vm_page *page; |
struct vm_object *object, *new_object = 0; |
78 |
|
int must_init_p = 0, kot; |
79 |
|
|
80 |
|
/* Look for an object associated with this port. */ |
81 |
|
for (;;) |
82 |
|
{ |
83 |
|
kot = ipc_port_kobject_type (store); |
84 |
|
|
85 |
|
if (kot) |
86 |
|
break; |
87 |
|
|
88 |
|
if (new_object == 0) |
89 |
|
new_object = vm_object_allocate (size); |
90 |
|
else |
91 |
|
{ |
92 |
|
ipc_port_set_kobject (store, IPC_KOBJECT_TYPE_PAGER, |
93 |
|
new_object); |
94 |
|
new_object = 0; |
95 |
|
must_init_p = 1; |
96 |
|
} |
97 |
|
} |
98 |
|
|
99 |
|
if (internal_p) |
100 |
|
must_init_p = 1; |
101 |
|
|
102 |
|
/* It's only good if it's a VM object! */ |
103 |
|
object = (kot == IPC_KOBJECT_TYPE_PAGER) ? store->kobject.port : 0; |
104 |
|
|
105 |
if (--object->refcnt != 0) |
if (object && ! must_init_p) |
106 |
return; |
{ |
107 |
|
vm_object_lock (object); |
108 |
|
object->refcnt++; |
109 |
|
vm_object_unlock(object); |
110 |
|
} |
111 |
|
|
|
/* If we are a shadow object (ie if we're a COW object |
|
|
for another object) we have to release reference to it. */ |
|
112 |
|
|
113 |
if (object->shadow_object) |
/* If we raced to create a vm_object but lost, |
114 |
vm_object_destroy (object->shadow_object); |
let's throw away ours. */ |
115 |
|
if (new_object) |
116 |
|
vm_object_deallocate (new_object); |
117 |
|
|
118 |
/* Iterate through all pages of object and release them. */ |
if (! object) |
119 |
queue_iterate (& object->pageq, page, struct vm_page *, pageq) |
return 0; |
120 |
|
|
121 |
|
if (must_init_p) |
122 |
{ |
{ |
123 |
vm_page_remove (page); |
/* Copy the naked send right we were given. */ |
124 |
vm_page_release (page); |
store = ipc_port_copy_send (store); |
125 |
|
if (! IPC_PORT_VALID (store)) |
126 |
|
panic ("vm_object_enter: port died"); |
127 |
|
|
128 |
|
object->pager_created_p = true; |
129 |
|
|
130 |
|
object->pager = pager; |
131 |
|
object->pager_cookie = store; |
132 |
|
|
133 |
|
/* Allocate request port. */ |
134 |
|
object->pager_request = ipc_port_create_kernel (); |
135 |
|
if (! object->pager_request) |
136 |
|
panic("vm_object_enter: pager request alloc"); |
137 |
|
|
138 |
|
ipc_port_set_kobject (object->pager_request, |
139 |
|
IPC_KOBJECT_TYPE_PAGING_REQUEST, object); |
140 |
|
|
141 |
|
/* Let the pager know we're using it. */ |
142 |
|
if (internal_p) |
143 |
|
{ |
144 |
|
/* ?? send message to default pager. */ |
145 |
|
|
146 |
|
/* mark the object internal */ |
147 |
|
object->internal_p = 1; |
148 |
|
assert(object->temporary_p); |
149 |
|
|
150 |
|
/* default-pager objects are ready immediately */ |
151 |
|
object->pager_ready_p = 1; |
152 |
|
|
153 |
|
/* consumes the naked send right for DMM */ |
154 |
|
#if 0 |
155 |
|
(void) memory_object_create(DMM, |
156 |
|
pager, |
157 |
|
object->size, |
158 |
|
object->pager_request, |
159 |
|
object->pager_name, |
160 |
|
PAGE_SIZE); |
161 |
|
#endif |
162 |
|
} |
163 |
|
else |
164 |
|
{ |
165 |
|
/* the object is external and not temporary */ |
166 |
|
object->internal_p = 0; |
167 |
|
object->temporary_p = 0; |
168 |
|
|
169 |
|
/* User pager objects are not ready until marked so */ |
170 |
|
object->pager_ready_p = 0; |
171 |
|
|
172 |
|
PAGER_INIT_OBJECT (object); |
173 |
|
} |
174 |
|
|
175 |
|
vm_object_lock(object); |
176 |
|
object->pager_initialized_p = 1; |
177 |
|
|
178 |
|
vm_object_wakeup (object, VM_OBJECT_EVENT_INITIALIZED); |
179 |
} |
} |
180 |
|
else |
181 |
|
vm_object_lock (object); |
182 |
|
|
183 |
|
/* [At this point, the object must be locked] |
184 |
|
|
185 |
|
Wait for the work above to be done by the first |
186 |
|
thread to map this object. */ |
187 |
|
while (! object->pager_initialized_p) |
188 |
|
{ |
189 |
|
vm_object_wait (object, VM_OBJECT_EVENT_INITIALIZED); |
190 |
|
vm_object_lock (object); |
191 |
|
} |
192 |
|
vm_object_unlock (object); |
193 |
|
|
194 |
|
return object; |
195 |
|
} |
196 |
|
|
197 |
|
/* Release a reference to the specified object, gained either |
198 |
|
through a vm_object_allocate or a vm_object_reference call. |
199 |
|
|
200 |
|
When all references are gone, storage associated with this |
201 |
|
object may be relinquished. No object may be locked. */ |
202 |
|
void |
203 |
|
vm_object_deallocate (struct vm_object *object) |
204 |
|
{ |
205 |
|
struct vm_object *temp; |
206 |
|
|
207 |
|
while (object != 0) |
208 |
|
{ |
209 |
|
/* Lose the reference. */ |
210 |
|
vm_object_lock (object); |
211 |
|
if (--(object->refcnt) > 0) |
212 |
|
{ |
213 |
|
/* If there are still references, then we are done. */ |
214 |
|
vm_object_unlock (object); |
215 |
|
return; |
216 |
|
} |
217 |
|
|
218 |
|
if (object->pager_created_p && !object->pager_initialized_p) |
219 |
|
{ |
220 |
|
/* Have to wait for initialization. Put reference back |
221 |
|
and retry when it's initialized. */ |
222 |
|
object->refcnt++; |
223 |
|
vm_object_assert_wait (object, |
224 |
|
VM_OBJECT_EVENT_INITIALIZED); |
225 |
|
vm_object_unlock (object); |
226 |
|
thread_block (0); |
227 |
|
continue; |
228 |
|
} |
229 |
|
|
230 |
|
/* Take the reference to the shadow object |
231 |
|
out of the object to be destroyed. */ |
232 |
|
temp = object->shadow_object; |
233 |
|
|
234 |
|
/* Destroy the object; the cache lock will |
235 |
|
be released in the process. */ |
236 |
|
vm_object_terminate (object); |
237 |
|
|
238 |
|
/* Deallocate the reference to the shadow |
239 |
|
by continuing the loop with that object |
240 |
|
in place of the original. */ |
241 |
|
object = temp; |
242 |
|
} |
243 |
|
} |
244 |
|
|
245 |
|
/* Free all resources associated with OBJECT. Upon entry, the object |
246 |
|
must be locked and have no references. The shadow object reference |
247 |
|
is left alone. */ |
248 |
|
void |
249 |
|
vm_object_terminate (struct vm_object *object) |
250 |
|
{ |
251 |
|
struct vm_object *shadow_object; |
252 |
|
struct vm_page *m; |
253 |
|
|
254 |
|
#if 0 |
255 |
|
/* Make sure the object isn't already being terminated. */ |
256 |
|
assert(object->alive); |
257 |
|
object->alive = FALSE; |
258 |
|
|
259 |
|
/* Make sure no one can look us up now. */ |
260 |
|
vm_object_remove (object); |
261 |
|
vm_object_cache_unlock(); |
262 |
|
#endif |
263 |
|
|
264 |
|
/* Detach the object from its shadow if we are the shadow's copy. */ |
265 |
|
if ((shadow_object = object->shadow_object) != 0) |
266 |
|
{ |
267 |
|
vm_object_lock (shadow_object); |
268 |
|
assert ((shadow_object->copy == object) |
269 |
|
|| (shadow_object->copy == 0)); |
270 |
|
shadow_object->copy = 0; |
271 |
|
vm_object_unlock (shadow_object); |
272 |
|
} |
273 |
|
|
274 |
|
/* The pageout daemon might be playing with our pages. |
275 |
|
Now that the object is dead, it won't touch any more |
276 |
|
pages, but some pages might already be on their way out. |
277 |
|
Hence, we wait until the active paging activities have ceased. */ |
278 |
|
vm_object_paging_wait (object); |
279 |
|
|
280 |
|
/* Clean or free the pages, as appropriate. It is possible for |
281 |
|
us to find busy/absent pages, if some faults on this object |
282 |
|
were aborted. */ |
283 |
|
if ((object->temporary_p) || (object->pager == 0)) |
284 |
|
{ |
285 |
|
while (! queue_empty (& object->pageq)) |
286 |
|
{ |
287 |
|
m = (struct vm_page *) queue_first (&object->pageq); |
288 |
|
|
289 |
|
if (m->busy_p && !m->absent_p) |
290 |
|
panic ("vm_object_terminate.2 0x%x 0x%x", |
291 |
|
object, m); |
292 |
|
|
293 |
|
vm_page_free (m); |
294 |
|
} |
295 |
|
} |
296 |
|
else |
297 |
|
while (! queue_empty (& object->pageq)) |
298 |
|
{ |
299 |
|
m = (struct vm_page *) queue_first (& object->pageq); |
300 |
|
|
301 |
|
if (m->busy_p && !m->absent_p) |
302 |
|
panic ("vm_object_terminate.3 0x%x 0x%x", object, m); |
303 |
|
VM_PAGE_QUEUES_REMOVE (m); |
304 |
|
|
305 |
|
if (m->absent_p) |
306 |
|
{ |
307 |
|
/* For private pages, VM_PAGE_FREE just leaves the page |
308 |
|
structure around for its owner to clean up. For absent |
309 |
|
pages, the structure is returned to the appropriate pool. */ |
310 |
|
goto free_page; |
311 |
|
} |
312 |
|
|
313 |
|
if (m->fictitious_p) |
314 |
|
panic("vm_object_terminate.4 0x%x 0x%x", object, m); |
315 |
|
|
316 |
|
#if 0 |
317 |
|
if (!m->dirty) |
318 |
|
m->dirty = pmap_is_modified (p->phys_addr); |
319 |
|
#endif |
320 |
|
|
321 |
|
if (m->dirty_p || m->precious_p) |
322 |
|
{ |
323 |
|
m->busy_p = 1; |
324 |
|
vm_pageout_page (m, 0, 1); |
325 |
|
} |
326 |
|
else |
327 |
|
{ |
328 |
|
free_page: |
329 |
|
vm_page_free (m); |
330 |
|
} |
331 |
|
} |
332 |
|
|
333 |
|
assert (object->refcnt == 0); |
334 |
|
assert (object->paging_in_progress == 0); |
335 |
|
|
336 |
|
/* Throw away port rights... note that they may already have |
337 |
|
been thrown away (by vm_object_deallocate or memory_object_destroy). |
338 |
|
|
339 |
|
Instead of destroying the control and name ports, |
340 |
|
we send all rights off to the memory manager instead, |
341 |
|
using memory_object_terminate. */ |
342 |
|
vm_object_unlock (object); |
343 |
|
|
344 |
|
if (object->pager != 0) |
345 |
|
{ |
346 |
|
#if 0 |
347 |
|
/* consumes our rights for pager, pager_request, pager_name */ |
348 |
|
memory_object_release (object->pager, |
349 |
|
object->pager_request, |
350 |
|
object->pager_name); |
351 |
|
#endif |
352 |
|
} |
353 |
|
|
354 |
kmem_cache_free (object_cache, object); |
kmem_cache_free (object_cache, object); |
355 |
} |
} |
356 |
|
|
357 |
/* Shadow object OBJECT at offset OFFSET. */ |
/* Shadow object OBJECT at offset OFFSET. */ |
|
|
|
358 |
struct vm_object * |
struct vm_object * |
359 |
vm_object_shadow (struct vm_object *object, vm_offset_t offset) |
vm_object_shadow (struct vm_object *object, vm_offset_t offset) |
360 |
{ |
{ |
372 |
} |
} |
373 |
|
|
374 |
/* Prepare for delayed copy (copy-on-write share) of OBJECT. */ |
/* Prepare for delayed copy (copy-on-write share) of OBJECT. */ |
|
|
|
375 |
void |
void |
376 |
vm_object_prepare_delayed_copy (struct vm_object *object) |
vm_object_prepare_delayed_copy (struct vm_object *object) |
377 |
{ |
{ |
378 |
struct vm_page *page; |
struct vm_page *page; |
379 |
|
|
380 |
queue_iterate (& object->pageq, page, struct vm_page *, pageq) |
queue_iterate (& object->pageq, page, struct vm_page *, pageq) |
381 |
{ |
PMAP_PAGE_PROTECT (page->phys_addr, VM_PROT_READ); |
|
PMAP_PAGE_PROTECT (page->phys_addr, VM_PROT_READ); |
|
|
} |
|
382 |
} |
} |
383 |
|
|
384 |
/* Bootstrap VM objects. */ |
/* Bootstrap VM objects. */ |
|
|
|
385 |
void |
void |
386 |
vm_object_bootstrap (void) |
vm_object_bootstrap (void) |
387 |
{ |
{ |
403 |
} |
} |
404 |
|
|
405 |
/* Initialize VM objects. */ |
/* Initialize VM objects. */ |
|
|
|
406 |
void |
void |
407 |
vm_object_init (void) |
vm_object_init (void) |
408 |
{ |
{ |