1 |
/* VM address spaces. |
/* VM address spaces. |
2 |
Copyright 1999, 2000, 2001, 2002 Johan Rydberg, jrydberg@opencores.org. |
Copyright 1999, 2000, 2001, 2002 Johan Rydberg, jrydberg@rtmk.org. |
3 |
|
|
4 |
This program is free software; you can redistribute it and/or modify |
This program is free software; you can redistribute it and/or modify |
5 |
it under the terms of the GNU General Public License as published by |
it under the terms of the GNU General Public License as published by |
108 |
/* Map for kernel memory. */ |
/* Map for kernel memory. */ |
109 |
struct vm_map *map_kernel; |
struct vm_map *map_kernel; |
110 |
|
|
111 |
/* This flag is true if we are currently growing the map entry cache. |
#ifndef N_STATIC_MAP_ENTRIES |
112 |
We must track this since the slab allocator tries to allocate entries. */ |
# define N_STATIC_MAP_ENTRIES 1024 |
113 |
/* ??? we need a spinlock on this one? */ |
#endif |
114 |
static bool growing_entry_cache_flag_p = false; |
|
115 |
|
/* We have a static allocated array of special map entries. |
116 |
/* Threshold for allocation of more entries. */ |
These are used to for interrupt safe maps (ie the kernel |
117 |
#define GROW_ENTRY_CACHE_THRESHOLD 10 |
map). We also hold a (protected) list of free entries. */ |
118 |
|
static struct vm_map_entry kernel_map_entires [N_STATIC_MAP_ENTRIES]; |
119 |
/* This checks if there's need for allocating more map entry objects. */ |
|
120 |
|
static struct vm_map_entry *static_free_entry_list; |
121 |
static inline void |
static spin_lock_t static_free_entry_lock = SPIN_LOCK_INITIALIZER; |
122 |
check_for_minimum_entries (void) |
|
123 |
|
/* Allocate a map entry structure for MAP. Return it. */ |
124 |
|
static struct vm_map_entry * |
125 |
|
vm_map_entry_allocate (struct vm_map *map) |
126 |
{ |
{ |
127 |
if (map_entry_cache->freecnt < GROW_ENTRY_CACHE_THRESHOLD |
struct vm_map_entry *entry; |
128 |
&& growing_entry_cache_flag_p == false) |
|
129 |
|
if (map->flags & VM_MAP_INTRSAFE || map == map_kernel || !map_kernel) |
130 |
{ |
{ |
131 |
growing_entry_cache_flag_p = true; |
SPL_T spl; |
132 |
kmem_cache_grow (map_entry_cache); |
|
133 |
growing_entry_cache_flag_p = false; |
spl = SPLOFF (); |
134 |
|
spin_lock (& static_free_entry_lock); |
135 |
|
entry = static_free_entry_list; |
136 |
|
if (entry) |
137 |
|
static_free_entry_list = (struct vm_map_entry *) entry->link.next; |
138 |
|
spin_unlock (& static_free_entry_lock); |
139 |
|
SPLON (spl); |
140 |
|
|
141 |
|
if (! entry) |
142 |
|
panic ("no static entries left! adjust N_STATIC_MAP_ENTRIES"); |
143 |
|
memset (entry, 0, sizeof (struct vm_map_entry)); |
144 |
|
entry->static_p = 1; |
145 |
} |
} |
146 |
} |
else |
147 |
|
entry = (struct vm_map_entry *) kmem_cache_alloc (map_entry_cache); |
148 |
|
|
149 |
/* Convenience function for allocating a fresh vm map entry. */ |
return entry; |
150 |
/* ??? locking? */ |
} |
151 |
|
|
152 |
static inline struct vm_map_entry * |
/* Free map entry ENTRY allocated with vm_map_entry_allocate. */ |
153 |
allocate_map_entry (void) |
static void |
154 |
|
vm_map_entry_free (struct vm_map *map, struct vm_map_entry *entry) |
155 |
{ |
{ |
156 |
struct vm_map_entry *new_entry; |
if (entry->static_p) |
157 |
|
{ |
158 |
new_entry = kmem_cache_alloc (map_entry_cache); |
SPL_T spl; |
159 |
if (! new_entry) |
|
160 |
return 0; |
spl = SPLOFF (); |
161 |
|
spin_lock (& static_free_entry_lock); |
162 |
check_for_minimum_entries (); |
entry->link.next = (struct queue_entry *) static_free_entry_list; |
163 |
|
static_free_entry_list = entry; |
164 |
memset (new_entry, 0, sizeof (struct vm_map_entry)); |
spin_unlock (& static_free_entry_lock); |
165 |
return new_entry; |
SPLON (spl); |
166 |
|
} |
167 |
|
else |
168 |
|
kmem_cache_free (map_entry_cache, entry); |
169 |
} |
} |
170 |
|
|
171 |
/* Conveneince function for scanning MAP for a entry that is |
/* Conveneince function for scanning MAP for a entry that is |
188 |
} |
} |
189 |
|
|
190 |
/* Initialize the VM map part of the VM system. */ |
/* Initialize the VM map part of the VM system. */ |
|
|
|
191 |
void |
void |
192 |
vm_map_init (void) |
vm_map_init (void) |
193 |
{ |
{ |
194 |
|
int i; |
195 |
|
|
196 |
|
/* Initialize list of static allocated kernel map entries. */ |
197 |
|
static_free_entry_list = 0; |
198 |
|
for (i = 0; i < N_STATIC_MAP_ENTRIES; i++) |
199 |
|
{ |
200 |
|
kernel_map_entires [i].link.next = |
201 |
|
(struct queue_entry *) static_free_entry_list; |
202 |
|
static_free_entry_list = & kernel_map_entires [i]; |
203 |
|
} |
204 |
|
|
205 |
/* Create cache for map structures. */ |
/* Create cache for map structures. */ |
206 |
map_cache = kmem_cache_create ("vm map cache", sizeof (struct vm_map), 0); |
map_cache = kmem_cache_create ("vm map cache", sizeof (struct vm_map), 0); |
207 |
|
|
213 |
/* Create memory map for kernel address space. */ |
/* Create memory map for kernel address space. */ |
214 |
map_kernel = vm_map_create (PMAP_KERNEL (), VM_KERN_MIN_ADDRESS, |
map_kernel = vm_map_create (PMAP_KERNEL (), VM_KERN_MIN_ADDRESS, |
215 |
VM_KERN_MAX_ADDRESS); |
VM_KERN_MAX_ADDRESS); |
216 |
|
map_kernel->flags = VM_MAP_INTRSAFE; |
217 |
assert (map_kernel); |
assert (map_kernel); |
218 |
} |
} |
219 |
|
|
222 |
/* Create a new VM map object. PMAP is the physical map that will |
/* Create a new VM map object. PMAP is the physical map that will |
223 |
belong to this address space. MIN and MAX specifies the limit of |
belong to this address space. MIN and MAX specifies the limit of |
224 |
the address space. Returns NULL if we fail to allocate memory. */ |
the address space. Returns NULL if we fail to allocate memory. */ |
|
|
|
225 |
struct vm_map * |
struct vm_map * |
226 |
vm_map_create (struct pmap *pmap, vm_address_t min, vm_address_t max) |
vm_map_create (struct pmap *pmap, vm_address_t min, vm_address_t max) |
227 |
{ |
{ |
242 |
return map; |
return map; |
243 |
} |
} |
244 |
|
|
245 |
|
|
246 |
|
/* Release reference to MAP. If reference counter drops to zero, |
247 |
|
release all resourecs held by map. */ |
248 |
|
void |
249 |
|
vm_map_release (struct vm_map *map) |
250 |
|
{ |
251 |
|
struct vm_map_entry *entry; |
252 |
|
int free_p; |
253 |
|
|
254 |
|
thread_lock_write (&map->lock); |
255 |
|
free_p = (--map->refcnt) == 0; |
256 |
|
thread_lock_unlock (&map->lock); |
257 |
|
|
258 |
|
if (free_p) |
259 |
|
{ |
260 |
|
while (! queue_empty (& map->entries)) |
261 |
|
{ |
262 |
|
entry = (struct vm_map_entry *) dequeue_head (& map->entries); |
263 |
|
|
264 |
|
if (entry->submap_p) |
265 |
|
vm_map_release (entry->submap); |
266 |
|
else |
267 |
|
vm_object_destroy (entry->object); |
268 |
|
vm_map_entry_free (map, entry); |
269 |
|
} |
270 |
|
} |
271 |
|
} |
272 |
|
|
273 |
/* Fork SRC_MAP. Return clone of it. We loop though all the map entries |
/* Fork SRC_MAP. Return clone of it. We loop though all the map entries |
274 |
and clone them according to the inherit flag. */ |
and clone them according to the inherit flag. */ |
275 |
|
|
276 |
/* ??? rewrite this function - make it look a bit nicer. break out to |
/* ??? rewrite this function - make it look a bit nicer. break out to |
277 |
several functions? */ |
several functions? */ |
|
|
|
278 |
struct vm_map * |
struct vm_map * |
279 |
vm_map_fork (struct vm_map *src_map) |
vm_map_fork (struct vm_map *src_map) |
280 |
{ |
{ |
310 |
struct vm_map *submap; |
struct vm_map *submap; |
311 |
kern_return_t kr; |
kern_return_t kr; |
312 |
|
|
313 |
new_entry = allocate_map_entry (); |
new_entry = vm_map_entry_allocate (dst_map); |
314 |
assert (new_entry); |
assert (new_entry); |
315 |
|
|
316 |
/* ??? what about the pmap? */ |
/* ??? what about the pmap? */ |
356 |
|
|
357 |
case VM_INHERIT_COPY: |
case VM_INHERIT_COPY: |
358 |
{ |
{ |
359 |
new_entry = allocate_map_entry (); |
new_entry = vm_map_entry_allocate (dst_map); |
360 |
assert (new_entry); |
assert (new_entry); |
361 |
|
|
362 |
new_entry->copy_on_write_p = true; |
new_entry->copy_on_write_p = true; |
369 |
new_entry->max_protection = src_entry->max_protection; |
new_entry->max_protection = src_entry->max_protection; |
370 |
new_entry->object = src_entry->object; |
new_entry->object = src_entry->object; |
371 |
new_entry->inherit = src_entry->inherit; |
new_entry->inherit = src_entry->inherit; |
372 |
|
new_entry->submap_p = false; |
373 |
|
|
374 |
if (src_entry->object) |
if (src_entry->object) |
375 |
{ |
{ |
495 |
|
|
496 |
/* Now we have found a place where to put the new entry. |
/* Now we have found a place where to put the new entry. |
497 |
Just to allocate and entry and insert it into the queue. */ |
Just to allocate and entry and insert it into the queue. */ |
498 |
new_entry = kmem_cache_alloc (map_entry_cache); |
new_entry = vm_map_entry_allocate (map); |
499 |
if (! new_entry) |
if (! new_entry) |
500 |
{ |
{ |
501 |
vm_map_unlock (map); |
vm_map_unlock (map); |
521 |
insque (&new_entry->link, &search_entry->link); |
insque (&new_entry->link, &search_entry->link); |
522 |
|
|
523 |
vm_map_unlock (map); |
vm_map_unlock (map); |
|
|
|
|
/* Check if we should grow the map entry cache - and return. */ |
|
|
check_for_minimum_entries (); |
|
524 |
return KERN_SUCCESS; |
return KERN_SUCCESS; |
525 |
} |
} |
526 |
|
|
550 |
|
|
551 |
/* We loop through all the entries in the map to see if we find a |
/* We loop through all the entries in the map to see if we find a |
552 |
empty spot that we (start_address-end_address) fit into. */ |
empty spot that we (start_address-end_address) fit into. */ |
|
|
|
553 |
while (search_entry) |
while (search_entry) |
554 |
{ |
{ |
555 |
start_address = vm_round_page (start_address); |
start_address = vm_round_page (start_address); |
576 |
|
|
577 |
/* Now we have found a place where to put the new entry. |
/* Now we have found a place where to put the new entry. |
578 |
Just to allocate and entry and insert it into the queue. */ |
Just to allocate and entry and insert it into the queue. */ |
579 |
|
new_entry = vm_map_entry_allocate (map); |
|
new_entry = kmem_cache_alloc (map_entry_cache); |
|
580 |
if (! new_entry) |
if (! new_entry) |
581 |
{ |
{ |
582 |
vm_map_unlock (map); |
vm_map_unlock (map); |
601 |
insque (&new_entry->link, &search_entry->link); |
insque (&new_entry->link, &search_entry->link); |
602 |
|
|
603 |
vm_map_unlock (map); |
vm_map_unlock (map); |
|
|
|
|
/* Check if we should grow the map entry cache - and return. */ |
|
|
check_for_minimum_entries (); |
|
604 |
return KERN_SUCCESS; |
return KERN_SUCCESS; |
605 |
} |
} |
606 |
|
|