/[rtmk]/rtmk/vm-kmem.c
ViewVC logotype

Diff of /rtmk/vm-kmem.c

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1.3 by jrydberg, Sat Dec 8 00:57:18 2001 UTC revision 1.4 by jrydberg, Sun Dec 9 20:48:17 2001 UTC
# Line 25  Foundation, Inc., 59 Temple Place - Suit Line 25  Foundation, Inc., 59 Temple Place - Suit
25  #include "vm-page.h"  #include "vm-page.h"
26  #include "trace.h"  #include "trace.h"
27    
28    #ifndef NBUCKETS
29    # define NBUCKETS 7
30    #endif
31    
32    #define MINSHIFT  4
33    
34    /* True if the buckets has been initialized.  */
35    static bool malloc_buckets_initialized = false;
36    
37    /* Structure describing malloc:ed memory block.  */
38    struct malloc_block
39    {
40      struct queue_entry *bucket;
41      char data [0];
42    };
43    
44    /* Malloc memory buckets.  */
45    
46    static struct queue_entry malloc_buckets [NBUCKETS];
47    
48    /* Convenience macro for returning size of bucket I.  */
49    
50    #define bucket_size(i) (1 << ((i) + MINSHIFT))
51    
52    
53  /* Insert pages into OBJECT at [OFFSET, OFFSET+SIZE) with protection  /* Insert pages into OBJECT at [OFFSET, OFFSET+SIZE) with protection
54     PROTECTION.  Returns address of memory block.  */     PROTECTION.  Returns address of memory block.  */
55    
# Line 73  map_pages (vm_offset_t offset, vm_size_t Line 98  map_pages (vm_offset_t offset, vm_size_t
98  void  void
99  vm_kmem_init (vm_offset_t virtual_start, vm_offset_t virtual_end)  vm_kmem_init (vm_offset_t virtual_start, vm_offset_t virtual_end)
100  {  {
101      int i;
102    
103    if (virtual_start != virtual_end && virtual_start != VM_KERN_MIN_ADDRESS)    if (virtual_start != virtual_end && virtual_start != VM_KERN_MIN_ADDRESS)
104      {      {
105        kern_return_t kr;        kern_return_t kr;
# Line 87  vm_kmem_init (vm_offset_t virtual_start, Line 114  vm_kmem_init (vm_offset_t virtual_start,
114                           VM_PROT_ALL, VM_PROT_ALL, VM_INHERIT_NONE);                           VM_PROT_ALL, VM_PROT_ALL, VM_INHERIT_NONE);
115        assert (kr == KERN_SUCCESS);        assert (kr == KERN_SUCCESS);
116      }      }
117    
118      for (i = 0; i < NBUCKETS; i++)
119        queue_init (& malloc_buckets [i]);
120      malloc_buckets_initialized = true;
121  }  }
122    
123    
# Line 157  kmem_map (struct vm_map *map, vm_size_t Line 188  kmem_map (struct vm_map *map, vm_size_t
188    
189    return map_pages (offset, size, pa, VM_PROT_ALL);    return map_pages (offset, size, pa, VM_PROT_ALL);
190  }  }
191    
192    
193    /* Function return bucket number for a memory block of SIZE.
194       Returns -1 if we found no bucket.  */
195    
196    static inline int bucket_number (vm_size_t size)
197    {
198      int cnt, pos = 1 << MINSHIFT;
199      
200      for (cnt = 0; cnt < NBUCKETS; cnt++)
201        {
202          if (pos >= size)
203            return cnt;
204          pos <<= 1;
205        }
206      return -1;
207    }
208    
209    /* Allocate SIZE bytes. Zero fill it if ZERO_P is true.  
210       SIZE should not be near the page size.   */
211    
212    void *
213    kmem_malloc1 (vm_size_t size, bool zero_p)
214    {
215      struct queue_entry *bucket;
216      struct malloc_block *block;
217      int cnt, bucket_nr, total;
218      
219      assert (malloc_buckets_initialized);
220    
221      total = size + sizeof (struct malloc_block);
222    
223      bucket_nr = bucket_number (total);
224      assert (bucket_nr >= 0);
225    
226      bucket = &malloc_buckets [bucket_nr];
227      
228      /* Check if we have to allocate more memory for this bucket.  */
229    
230      if (queue_empty (bucket))
231        {
232          struct queue_entry *entry;
233          vm_offset_t memory;
234    
235          memory = kmem_alloc_wired (VM_MAP_KERNEL (), VM_PAGE_SIZE);
236          assert (memory);
237    
238          /* Loop through entries and enqueue them to bucket.  */
239    
240          for (cnt = VM_PAGE_SIZE / bucket_size (bucket_nr); cnt; )
241            {
242              entry = (struct queue_entry *) memory;
243              memory = memory + bucket_size (bucket_nr);
244              enqueue (bucket, entry);
245              cnt --;
246            }
247        }
248      
249      block = (struct malloc_block *) dequeue (bucket);
250      block->bucket = bucket;
251    
252      if (zero_p)
253        memset (block->data, 0, size);
254    
255      return (void *) block->data;
256    }
257    
258    
259    /* Free memory that was allocated with kmem_malloc or kmem_zalloc.  */
260    
261    void
262    kmem_mfree (void *data)
263    {
264      struct malloc_block *block;
265      
266      block = (struct malloc_block *) ((vm_offset_t) data - sizeof *block);
267      enqueue (block->bucket, (struct queue_entry *) block);
268    }
269    

Legend:
Removed from v.1.3  
changed lines
  Added in v.1.4

savannah-hackers-public@gnu.org
ViewVC Help
Powered by ViewVC 1.1.26