30 |
#include "vm-map.h" |
#include "vm-map.h" |
31 |
#include "trace.h" |
#include "trace.h" |
32 |
#include "libkern.h" |
#include "libkern.h" |
33 |
|
#include "vm-page.h" |
34 |
|
|
35 |
struct vm_object; |
struct vm_object; |
36 |
|
|
145 |
entry->static_p = 1; |
entry->static_p = 1; |
146 |
} |
} |
147 |
else |
else |
148 |
entry = (struct vm_map_entry *) kmem_cache_alloc (map_entry_cache); |
{ |
149 |
|
entry = (struct vm_map_entry *) kmem_cache_alloc (map_entry_cache); |
150 |
|
if (entry) |
151 |
|
memset (entry, 0, sizeof (struct vm_map_entry)); |
152 |
|
} |
153 |
|
|
154 |
return entry; |
return entry; |
155 |
} |
} |
173 |
kmem_cache_free (map_entry_cache, entry); |
kmem_cache_free (map_entry_cache, entry); |
174 |
} |
} |
175 |
|
|
176 |
|
/* Copies the full contents of SRC_ENTRY to NEW_ENTRY, |
177 |
|
preserving the static flag of NEW_ENTRY. */ |
178 |
|
#define vm_map_entry_copy(NEW_ENTRY, SRC_ENTRY) \ |
179 |
|
do \ |
180 |
|
{ \ |
181 |
|
int static_p = (NEW_ENTRY)->static_p; \ |
182 |
|
*(NEW_ENTRY) = *(SRC_ENTRY); \ |
183 |
|
(NEW_ENTRY)->static_p = static_p; \ |
184 |
|
} \ |
185 |
|
while (0) |
186 |
|
|
187 |
|
/* Asserts that ENTRY begins at or after the specified address; |
188 |
|
if necessary, it splits the entry into two. */ |
189 |
|
#define vm_map_clip_start(MAP, ENTRY, STARTADDR) \ |
190 |
|
do \ |
191 |
|
{ \ |
192 |
|
if ((STARTADDR) > (ENTRY)->start) \ |
193 |
|
_vm_map_clip_start (MAP, ENTRY, STARTADDR); \ |
194 |
|
} \ |
195 |
|
while (0) |
196 |
|
|
197 |
|
/* This routine is called only when it is known that |
198 |
|
the entry must be split. */ |
199 |
|
static void |
200 |
|
_vm_map_clip_start (struct vm_map *map, struct vm_map_entry *entry, |
201 |
|
vm_offset_t start) |
202 |
|
{ |
203 |
|
struct vm_map_entry *new_entry, *insert_entry; |
204 |
|
|
205 |
|
/* Split off the front portion -- note that we must insert the new |
206 |
|
entry BEFORE this one, so that this entry has the specified starting |
207 |
|
address. */ |
208 |
|
new_entry = vm_map_entry_allocate (map); |
209 |
|
vm_map_entry_copy (new_entry, entry); |
210 |
|
|
211 |
|
new_entry->end = start; |
212 |
|
entry->offset += (start - entry->start); |
213 |
|
entry->start = start; |
214 |
|
|
215 |
|
insert_entry = (struct vm_map_entry *) queue_prev (&entry->link); |
216 |
|
insque (& new_entry->link, & insert_entry->link); |
217 |
|
|
218 |
|
if (entry->submap_p) |
219 |
|
vm_map_reference (new_entry->submap); |
220 |
|
else |
221 |
|
vm_object_reference (new_entry->object); |
222 |
|
} |
223 |
|
|
224 |
|
/* Asserts that ENTRY ends at or before the specified address; |
225 |
|
if necessary, it splits the entry into two. */ |
226 |
|
#define vm_map_clip_end(MAP, ENTRY, ENDADDR) \ |
227 |
|
do \ |
228 |
|
{ \ |
229 |
|
if ((ENDADDR) < (ENTRY)->end) \ |
230 |
|
_vm_map_clip_end (MAP, ENTRY, ENDADDR); \ |
231 |
|
} \ |
232 |
|
while (0) |
233 |
|
|
234 |
|
/* This routine is called only when it is known that |
235 |
|
the entry must be split. */ |
236 |
|
static void |
237 |
|
_vm_map_clip_end (struct vm_map *map, struct vm_map_entry *entry, |
238 |
|
vm_offset_t end) |
239 |
|
{ |
240 |
|
struct vm_map_entry *new_entry; |
241 |
|
|
242 |
|
/* Create a new entry and insert it AFTER the specified entry. */ |
243 |
|
new_entry = vm_map_entry_allocate (map); |
244 |
|
vm_map_entry_copy (new_entry, entry); |
245 |
|
|
246 |
|
new_entry->start = entry->end = end; |
247 |
|
new_entry->offset += (end - entry->start); |
248 |
|
|
249 |
|
/* Insert high split entry right after the current entry. */ |
250 |
|
insque (& new_entry->link, & entry->link); |
251 |
|
|
252 |
|
if (entry->submap_p) |
253 |
|
vm_map_reference (new_entry->submap); |
254 |
|
else |
255 |
|
vm_object_reference (new_entry->object); |
256 |
|
} |
257 |
|
|
258 |
/* Conveneince function for scanning MAP for a entry that is |
/* Conveneince function for scanning MAP for a entry that is |
259 |
located at OFFSET. If found, it's returned - otherwise NULL. */ |
located at OFFSET. If found, it's returned - otherwise NULL. */ |
|
|
|
260 |
static inline struct vm_map_entry * |
static inline struct vm_map_entry * |
261 |
lookup_entry_in_map (struct vm_map *map, vm_address_t offset) |
lookup_entry_in_map (struct vm_map *map, vm_address_t offset, |
262 |
|
struct vm_map_entry **prevp) |
263 |
{ |
{ |
264 |
struct vm_map_entry *entry; |
struct vm_map_entry *entry; |
265 |
|
|
266 |
|
if (prevp) |
267 |
|
*prevp = 0; |
268 |
|
|
269 |
entry = vm_map_entry_first (map); |
entry = vm_map_entry_first (map); |
270 |
while (entry) |
while (entry) |
271 |
{ |
{ |
272 |
|
if (entry->end > offset) |
273 |
|
{ |
274 |
|
if (offset >= entry->start) |
275 |
|
return entry; |
276 |
|
if (prevp) |
277 |
|
*prevp = entry; |
278 |
|
return 0; |
279 |
|
} |
280 |
|
#if 0 |
281 |
if ((offset >= entry->start) && (offset < entry->end)) |
if ((offset >= entry->start) && (offset < entry->end)) |
282 |
return entry; |
return entry; |
283 |
|
#endif |
284 |
entry = vm_map_entry_next (map, entry); |
entry = vm_map_entry_next (map, entry); |
285 |
} |
} |
286 |
return 0; |
return 0; |
369 |
} |
} |
370 |
} |
} |
371 |
|
|
372 |
|
/* Obtail a reference to MAP. */ |
373 |
|
void |
374 |
|
vm_map_reference (struct vm_map *map) |
375 |
|
{ |
376 |
|
map->refcnt++; |
377 |
|
} |
378 |
|
|
379 |
/* Fork SRC_MAP. Return clone of it. We loop though all the map entries |
/* Fork SRC_MAP. Return clone of it. We loop though all the map entries |
380 |
and clone them according to the inherit flag. */ |
and clone them according to the inherit flag. */ |
381 |
|
|
425 |
|
|
426 |
/* Insert object into new submap. */ |
/* Insert object into new submap. */ |
427 |
kr = vm_map_enter (submap, src_entry->start, |
kr = vm_map_enter (submap, src_entry->start, |
428 |
vm_map_entry_size (src_entry), src_entry->object, |
vm_map_entry_size (src_entry), |
429 |
src_entry->max_protection, src_entry->protection, |
src_entry->object, |
430 |
|
src_entry->max_protection, |
431 |
|
src_entry->protection, |
432 |
src_entry->inherit); |
src_entry->inherit); |
433 |
assert (kr == KERN_SUCCESS); |
assert (kr == KERN_SUCCESS); |
434 |
|
|
435 |
/* Add a refernece to the new submap (we already have one for beeing |
/* Add a refernece to the new submap (we already have one for |
436 |
alive). No need to lock submap, since it's just created. */ |
being alive). No need to lock submap, since it's just |
437 |
|
created. */ |
438 |
submap->refcnt++; |
submap->refcnt++; |
439 |
|
|
440 |
/* ??? reference to object. */ |
/* ??? reference to object. */ |
561 |
|
|
562 |
/* Insert OBJECT at OFFSET in MAP. SIZE is the size of the object. |
/* Insert OBJECT at OFFSET in MAP. SIZE is the size of the object. |
563 |
MAX_PROTECTON and PROTECTION is pretty much self-explaining. */ |
MAX_PROTECTON and PROTECTION is pretty much self-explaining. */ |
|
|
|
564 |
kern_return_t |
kern_return_t |
565 |
vm_map_enter (struct vm_map *map, vm_address_t offset, vm_size_t size, |
vm_map_enter (struct vm_map *map, vm_address_t offset, vm_size_t size, |
566 |
struct vm_object *object, vm_prot_t max_protection, |
struct vm_object *object, vm_prot_t max_protection, |
572 |
vm_map_lock (map); |
vm_map_lock (map); |
573 |
|
|
574 |
/* Check so that we the entry in the map is already allocated. */ |
/* Check so that we the entry in the map is already allocated. */ |
575 |
if (lookup_entry_in_map (map, offset) |
if (lookup_entry_in_map (map, offset, 0) |
576 |
|| lookup_entry_in_map (map, vm_trunc_page ((offset + size - 1)))) |
|| lookup_entry_in_map (map, vm_trunc_page ((offset + size - 1)), 0)) |
577 |
{ |
{ |
578 |
vm_map_unlock (map); |
vm_map_unlock (map); |
579 |
return KERN_NO_SPACE; |
return KERN_NO_SPACE; |
637 |
false we try to insert OBJECT at *OFFSETP, otherwise the new offset |
false we try to insert OBJECT at *OFFSETP, otherwise the new offset |
638 |
for OBJECT is return in *OFFSETP. SIZE is the size of the object. |
for OBJECT is return in *OFFSETP. SIZE is the size of the object. |
639 |
MAX_PROTECTON and PROTECTION is pretty much self-explaining. */ |
MAX_PROTECTON and PROTECTION is pretty much self-explaining. */ |
|
|
|
640 |
kern_return_t |
kern_return_t |
641 |
vm_map_allocate (struct vm_map *map, vm_address_t *offsetp, vm_size_t size, |
vm_map_allocate (struct vm_map *map, vm_address_t *offsetp, vm_size_t size, |
642 |
struct vm_object *object, vm_prot_t max_protection, |
struct vm_object *object, vm_prot_t max_protection, |
711 |
return KERN_SUCCESS; |
return KERN_SUCCESS; |
712 |
} |
} |
713 |
|
|
714 |
|
/* Change protection for rgion [ADDRESS, ADDRESS+SIZE) in MAP |
715 |
|
to PROTECTION. OFFSET is offset into OBJECT. */ |
716 |
|
void |
717 |
|
vm_map_pmap_protect (struct vm_map *map, vm_offset_t address, |
718 |
|
vm_offset_t offset, vm_size_t size, |
719 |
|
struct vm_object *object, vm_prot_t protection) |
720 |
|
{ |
721 |
|
struct vm_page *page; |
722 |
|
|
723 |
|
/* If we're not chaning protection for kernel map we're chaning |
724 |
|
protection for user map (very logical). Add user offset. */ |
725 |
|
if (map->pmap != PMAP_KERNEL ()) |
726 |
|
address = address + VM_USER_MIN_ADDRESS; |
727 |
|
|
728 |
|
/* Iterate through all pages (that we may access) and change prot. */ |
729 |
|
queue_iterate (&object->pageq, page, struct vm_page *, pageq) |
730 |
|
{ |
731 |
|
if (offset > page->offset) |
732 |
|
continue; |
733 |
|
|
734 |
|
if (page->offset > (offset + size)) |
735 |
|
break; |
736 |
|
|
737 |
|
PMAP_PROTECT (map->pmap, address - offset + page->offset, protection); |
738 |
|
} |
739 |
|
} |
740 |
|
|
741 |
|
|
742 |
/* Function searches for a entry located at OFFSET in MAP. If found, |
/* Function searches for a entry located at OFFSET in MAP. If found, |
743 |
information about it is returned in OBJECTP, ADDRESSP, PROTP and |
information about it is returned in OBJECTP, ADDRESSP, PROTP and |
744 |
WIREDP. ENTRYP is set to the entry value. */ |
WIREDP. ENTRYP is set to the entry value. */ |
|
|
|
745 |
kern_return_t |
kern_return_t |
746 |
vm_map_lookup (struct vm_map **map, vm_address_t offset, |
vm_map_lookup (struct vm_map **map, vm_address_t offset, |
747 |
vm_prot_t fault_prot, struct vm_object **objectp, |
vm_prot_t fault_prot, struct vm_object **objectp, |
753 |
|
|
754 |
do |
do |
755 |
{ |
{ |
756 |
entry = lookup_entry_in_map (lookup_map, offset); |
entry = lookup_entry_in_map (lookup_map, offset, 0); |
757 |
if (entry && entry->submap_p) |
if (entry && entry->submap_p) |
758 |
lookup_map = entry->submap; |
lookup_map = entry->submap; |
759 |
} |
} |
775 |
entry->copy_on_write_p = false; |
entry->copy_on_write_p = false; |
776 |
} |
} |
777 |
|
|
778 |
|
/* Allocate object if none! */ |
779 |
|
if (! entry->object) |
780 |
|
entry->object = vm_object_allocate (entry->end - entry->start); |
781 |
|
|
782 |
*map = lookup_map; |
*map = lookup_map; |
783 |
*objectp = entry->object; |
*objectp = entry->object; |
784 |
*addressp = entry->start; |
*addressp = (entry->start - entry->offset); |
785 |
*protp = entry->protection; |
*protp = entry->protection; |
786 |
*wiredp = false; |
*wiredp = false; |
787 |
*entryp = entry; |
*entryp = entry; |
789 |
return KERN_SUCCESS; |
return KERN_SUCCESS; |
790 |
} |
} |
791 |
|
|
792 |
|
/* Delete range [OFFSET, OFFSET+SIZE) from MAP. */ |
793 |
|
kern_return_t |
794 |
|
vm_map_delete (struct vm_map *map, vm_offset_t offset, vm_size_t size) |
795 |
|
{ |
796 |
|
struct vm_map_entry *entry, *first_entry; |
797 |
|
|
798 |
|
if (! (entry = lookup_entry_in_map (map, offset, &first_entry))) |
799 |
|
entry = vm_map_entry_next (map, first_entry); |
800 |
|
else |
801 |
|
vm_map_clip_start (map, entry, offset); |
802 |
|
|
803 |
|
while (entry && entry->start < (offset + size)) |
804 |
|
{ |
805 |
|
struct vm_map_entry *next; |
806 |
|
|
807 |
|
vm_map_clip_end (map, entry, (offset + size)); |
808 |
|
|
809 |
|
next = vm_map_entry_next (map, entry); |
810 |
|
remque (& entry->link); |
811 |
|
|
812 |
|
if (entry->submap_p) |
813 |
|
vm_map_release (entry->submap); |
814 |
|
else |
815 |
|
{ |
816 |
|
/* Before releasing object we change protection for range. */ |
817 |
|
vm_map_pmap_protect (map, offset, entry->offset, |
818 |
|
(entry->end - entry->start), entry->object, 0); |
819 |
|
vm_object_destroy (entry->object); |
820 |
|
} |
821 |
|
vm_map_entry_free (map, entry); |
822 |
|
|
823 |
|
entry = next; |
824 |
|
} |
825 |
|
return KERN_SUCCESS; |
826 |
|
} |
827 |
|
|
828 |
|
/* Deallocate range [OFFSET, OFFSET+SIZE) from MAP. */ |
829 |
|
kern_return_t |
830 |
|
vm_map_deallocate (struct vm_map *map, vm_offset_t offset, vm_size_t size) |
831 |
|
{ |
832 |
|
kern_return_t kr; |
833 |
|
|
834 |
|
vm_map_lock (map); |
835 |
|
kr = vm_map_delete (map, offset, size); |
836 |
|
vm_map_unlock (map); |
837 |
|
return kr; |
838 |
|
} |
839 |
|
|
840 |
|
/* Sets the protection for address region [START, START+SIZE) in MAP. |
841 |
|
If SETMAX_P is specified, the maximum protection is to be set; |
842 |
|
otherwise, only the current protection is affected. */ |
843 |
kern_return_t |
kern_return_t |
844 |
vm_map_remove (struct vm_map *map, vm_address_t offset) |
vm_map_protect (struct vm_map *map, vm_offset_t start, vm_size_t size, |
845 |
|
vm_prot_t new_prot, int setmax_p) |
846 |
{ |
{ |
847 |
trace_printf ("implement vm_map_remove"); |
struct vm_map_entry *entry, *first_entry, *current; |
848 |
return KERN_FAILURE; |
vm_offset_t end = start + size; |
849 |
|
|
850 |
|
vm_map_lock (map); |
851 |
|
|
852 |
|
if (! (entry = lookup_entry_in_map (map, start, &first_entry))) |
853 |
|
entry = vm_map_entry_next (map, first_entry); |
854 |
|
else |
855 |
|
vm_map_clip_start (map, entry, start); |
856 |
|
|
857 |
|
/* Make a first pass to check for protection violations. */ |
858 |
|
|
859 |
|
current = entry; |
860 |
|
while (current && current->start < end) |
861 |
|
{ |
862 |
|
if (current->submap_p) |
863 |
|
{ |
864 |
|
vm_map_unlock(map); |
865 |
|
return KERN_INVALID_ARGUMENT; |
866 |
|
} |
867 |
|
|
868 |
|
if ((new_prot & current->max_protection) != new_prot) |
869 |
|
{ |
870 |
|
vm_map_unlock(map); |
871 |
|
return KERN_PROTECTION_FAILURE; |
872 |
|
} |
873 |
|
|
874 |
|
current = vm_map_entry_next (map, current); |
875 |
|
} |
876 |
|
|
877 |
|
/* Go back and fix up protections. |
878 |
|
(Note that clipping is not necessary the second time.) */ |
879 |
|
current = entry; |
880 |
|
while (current && current->start < end) |
881 |
|
{ |
882 |
|
vm_prot_t old_prot; |
883 |
|
|
884 |
|
vm_map_clip_end (map, current, end); |
885 |
|
|
886 |
|
old_prot = current->protection; |
887 |
|
if (setmax_p) |
888 |
|
current->protection = |
889 |
|
(current->max_protection = new_prot) & old_prot; |
890 |
|
else |
891 |
|
current->protection = new_prot; |
892 |
|
|
893 |
|
/* Update physical map if necessary. */ |
894 |
|
if (current->protection != old_prot) |
895 |
|
{ |
896 |
|
vm_map_pmap_protect (map, current->start, current->offset, |
897 |
|
(current->end - current->start), |
898 |
|
current->object, current->protection); |
899 |
|
} |
900 |
|
current = vm_map_entry_next (map, current); |
901 |
|
} |
902 |
|
|
903 |
|
vm_map_unlock(map); |
904 |
|
return KERN_SUCCESS; |
905 |
} |
} |
906 |
|
|
907 |
|
/* Copyin region [OFFSET, OFFSET+SIZE) into a holding map that is returned. |
908 |
|
If DEALLOCATE_P is true we deallocate region in SRC_MAP. */ |
909 |
|
struct vm_map * |
910 |
|
vm_map_copyin (struct vm_map *src_map, vm_offset_t offset, vm_size_t size, |
911 |
|
int deallocate_p) |
912 |
|
{ |
913 |
|
struct vm_map_entry *src_entry, *new_entry; |
914 |
|
struct vm_map *hold_map; |
915 |
|
|
916 |
|
struct vm_object *src_object; |
917 |
|
vm_size_t src_size; |
918 |
|
vm_offset_t src_offset; |
919 |
|
|
920 |
|
/* ??? */ |
921 |
|
offset = vm_trunc_page (offset); |
922 |
|
size = vm_round_page (size); |
923 |
|
|
924 |
|
/* Allocate the holding map. We do not create a pmap for this. */ |
925 |
|
hold_map = vm_map_create (0, 0, vm_round_page (size)); |
926 |
|
if (! hold_map) |
927 |
|
return 0; |
928 |
|
|
929 |
|
/* We loop through the source map to find the starting entry. */ |
930 |
|
vm_map_lock (src_map); |
931 |
|
|
932 |
|
src_entry = lookup_entry_in_map (src_map, offset, 0); |
933 |
|
if (! src_entry) |
934 |
|
{ |
935 |
|
vm_map_unlock (src_map); |
936 |
|
return 0; |
937 |
|
} |
938 |
|
vm_map_clip_start (src_map, src_entry, offset); |
939 |
|
|
940 |
|
while (src_entry) |
941 |
|
{ |
942 |
|
/* Clip against the endpoints of the entire region. */ |
943 |
|
vm_map_clip_end (src_map, src_entry, (offset + size)); |
944 |
|
|
945 |
|
src_size = src_entry->end - offset; |
946 |
|
src_object = src_entry->object; |
947 |
|
src_offset = src_entry->offset; |
948 |
|
|
949 |
|
/* Create a new address map entry to hold the result. |
950 |
|
Fill in the fields from the appropriate source entries. */ |
951 |
|
new_entry = vm_map_entry_allocate (hold_map); |
952 |
|
vm_map_entry_copy (new_entry, src_entry); |
953 |
|
new_entry->copy_on_write_p = 0; |
954 |
|
|
955 |
|
/* Attempt non-blocking copy-on-write optimizations. */ |
956 |
|
if (deallocate_p && src_object != 0) |
957 |
|
{ |
958 |
|
vm_object_reference (src_object); |
959 |
|
goto copy_successful; |
960 |
|
} |
961 |
|
|
962 |
|
/* Alright, since we aint deallocating the the region we must |
963 |
|
do the full copy-on-write thing. :-/ */ |
964 |
|
new_entry->copy_on_write_p = 1; |
965 |
|
src_entry->copy_on_write_p = 1; |
966 |
|
|
967 |
|
/* If we have a source object we must prepare it for COW. */ |
968 |
|
if (src_object) |
969 |
|
{ |
970 |
|
vm_object_prepare_delayed_copy (src_entry->object); |
971 |
|
vm_object_reference (src_entry->object); |
972 |
|
} |
973 |
|
|
974 |
|
copy_successful: |
975 |
|
/* Link in the new map entry. */ |
976 |
|
enqueue_tail (& hold_map->entries, & new_entry->link); |
977 |
|
|
978 |
|
if (src_entry->end >= (offset + size)) |
979 |
|
break; |
980 |
|
src_entry = vm_map_entry_next (src_map, src_entry); |
981 |
|
} |
982 |
|
|
983 |
|
if (deallocate_p) |
984 |
|
vm_map_delete (src_map, offset, offset + size); |
985 |
|
|
986 |
|
vm_map_unlock (src_map); |
987 |
|
return hold_map; |
988 |
|
} |