/[rtmk]/rtmk/vm-page.c
ViewVC logotype

Diff of /rtmk/vm-page.c

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1.7 by jrydberg, Mon Feb 25 18:30:39 2002 UTC revision 1.8 by jrydberg, Wed Mar 6 00:55:01 2002 UTC
# Line 15  You should have received a copy of the G Line 15  You should have received a copy of the G
15  along with this program; if not, write to the Free Software  along with this program; if not, write to the Free Software
16  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.  */  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.  */
17    
 /* ??? remaining to do is to check all locking.  */  
   
18  #include "tm.h"  #include "tm.h"
19  #include "vm-page.h"  #include "vm-page.h"
20  #include "vm-object.h"  #include "vm-object.h"
21  #include "trace.h"  #include "trace.h"
22  #include "vm-slab.h"  #include "vm-slab.h"
23  #include "host.h"  #include "host.h"
24    #include "thread.h"
25    
26  /* This is true if we have initialized the resident pages module.  */  /* This is true if we have initialized the resident pages module.  */
   
27  bool vm_resident_pages_init = false;  bool vm_resident_pages_init = false;
28    
 /* Array of available physical segments, and counter of  
    available physical segments.  */  
   
 struct vm_physseg vm_physmem [MAX_VM_PHYSSEGS];  
 unsigned int vm_physmem_count = 0;  
   
 /* List of all free pages.  */  
 static struct queue_entry page_free_list = queue_ctor (page_free_list);  
   
 /* Lock for the free list.  */  
 static spin_lock_t page_free_lock = SPIN_LOCK_INITIALIZER;  
   
 /* Count of pages on the free list.  */  
 static int page_free_count = 0;  
   
 /* List of all active pages.  Used for swapping and so on.  */  
 static struct queue_entry page_active_list = queue_ctor (page_active_list);  
   
 /* Lock for the active list.  */  
 static spin_lock_t page_active_lock = SPIN_LOCK_INITIALIZER;  
   
29  /* Size of page hash table.  */  /* Size of page hash table.  */
30  #define PAGE_HASH_TABLE_SIZE \  #define PAGE_HASH_TABLE_SIZE \
31     ((2 * VM_PAGE_SIZE) / (sizeof (struct queue_entry *)))     ((2 * VM_PAGE_SIZE) / (sizeof (struct queue_entry *)))
# Line 61  static spin_lock_t page_active_lock = SP Line 38  static spin_lock_t page_active_lock = SP
38    (((unsigned) OBJECT + (unsigned) vm_atop(OFFSET)) & PAGE_HASH_TABLE_MASK)    (((unsigned) OBJECT + (unsigned) vm_atop(OFFSET)) & PAGE_HASH_TABLE_MASK)
39    
40  /* Hash table for fast page lookup.  */  /* Hash table for fast page lookup.  */
41  static struct queue_entry page_hash_table [PAGE_HASH_TABLE_SIZE];  static struct queue_entry vm_page_lookup_hash_table [PAGE_HASH_TABLE_SIZE];
42    
43  /* Spin lock (per hash table bucket).  */  /* Spin lock (per hash table bucket).  */
44  static spin_lock_t page_hash_lock [PAGE_HASH_TABLE_SIZE];  static spin_lock_t vm_page_lookup_hash_lock [PAGE_HASH_TABLE_SIZE];
45    
46    /* Array of available physical segments, and counter of
47       available physical segments.  */
48    struct vm_physseg vm_physmem [MAX_VM_PHYSSEGS];
49    unsigned int vm_physmem_count = 0;
50    
51    /* Locking protocol:
52    
53       There is one global lock (vm_page_queue_lock) for the free,
54       active and inactive list.  This is a thread lock.
55        
56       The hash buckets is protected with a per-bucket spin-lock.
57       Interrupts have to be disabled when altered.  
58    
59       The global lock is always the last lock to be taken.  */
60    struct thread_lock vm_page_queue_lock;
61    
62    /* List of all free pages.  This is protected by the global lock.  
63       We also maintain a counter of number of free pages on queue.  */
64    struct queue_entry vm_page_free_queue = queue_ctor (vm_page_free_queue);
65    int                vm_page_free_count;
66    
67    /* List if all active pages.  This is protected by the global lock.
68       We also maintain a counter of number of pages on queue.  */
69    struct queue_entry vm_page_active_queue = queue_ctor (vm_page_active_queue);
70    int                vm_page_active_count;
71    
72    /* List of all inactive pages.  This is protected by the global lock.  
73       We also maintain a counter of number of pages on queue.  */
74    struct queue_entry vm_page_inactive_queue
75                                          = queue_ctor (vm_page_inactive_queue);
76    int                vm_page_inactive_count;
77    
78    /* List of free fictitious pages.  Protected by the global lock.
79       We also maintain a counter of number of free pages on queue.  */
80    struct queue_entry vm_page_fictitious_free_queue
81                                    = queue_ctor (vm_page_fictitious_free_queue);
82    int                vm_page_fictitious_free_count;
83    
84    /* Template for initializing page structure.  The template is
85       itself initialized by the bootstrap function.  */
86    static struct vm_page vm_page_template;
87    
88  /* Cache for VM page structures.  Might come handy.  */  /* Cache for VM page structures.  Might come handy.  */
89  static struct kmem_cache *page_cache;  static struct kmem_cache *page_cache;
 static struct kmem_cache *fict_cache;  
90    
91  /* Initialize page stuff that is needed after the VM system is bootstrapped. */  /* Initialize page stuff that is needed after the VM
92       system is bootstrapped. */
93  void  void
94  vm_page_module_init (void)  vm_page_module_init (void)
95  {  {
96    page_cache = kmem_cache_create ("page cache",    page_cache = kmem_cache_create ("page cache",
97                                    sizeof (struct vm_page), 0);                                    sizeof (struct vm_page), 0);
98    fict_cache = kmem_cache_create ("fict cache",    assert (page_cache);
                                   sizeof (struct vm_page), 0);  
   assert (page_cache && fict_cache);  
99  }  }
100    
   
101  /* Initialize all resident pages.  After this function is called  /* Initialize all resident pages.  After this function is called
102     the system can not steal more memory.  */     the system can not steal more memory.  */
103  void  void
104  vm_page_resident_pages_init (vm_offset_t *vstartp, vm_offset_t *vendp)  vm_page_resident_pages_init (vm_offset_t *vstartp, vm_offset_t *vendp)
105  {  {
106    int i, npages, resident_pages = 0, n;    int i, npages, resident_pages = 0, n;
107    struct vm_page *pages;    struct vm_page *pages, *p;
108    struct vm_physseg *ps;    struct vm_physseg *ps;
109    
110      /* Initialize the global lock.  */
111      thread_lock_init (& vm_page_queue_lock, 1, 1);
112    
113    /* Before anything else we steal initial memory for the slab allocator.  */    /* Before anything else we steal initial memory for the slab allocator.  */
114    kmem_cache_bootstrap_data = PMAP_STEAL_MEMORY (kmem_cache_bootstrap_size);    kmem_cache_bootstrap_data = PMAP_STEAL_MEMORY (kmem_cache_bootstrap_size);
115    assert (kmem_cache_bootstrap_data);    assert (kmem_cache_bootstrap_data);
116    
117      /* Initialize the page structure template.  All fields
118         except the busy flag is null, so we optimize with memset.  */
119      p = & vm_page_template;
120      memset (p, 0, sizeof *p);
121      p->busy_p = 1;
122    
123    /* Initialize all page hash buckets.  */    /* Initialize all page hash buckets.  */
124    for (i = 0; i < PAGE_HASH_TABLE_SIZE; i++)    for (i = 0; i < PAGE_HASH_TABLE_SIZE; i++)
125      {      {
126        queue_init (& page_hash_table [i]);        queue_init (& vm_page_lookup_hash_table [i]);
127        page_hash_lock [i] = SPIN_LOCK_INITIALIZER;        vm_page_lookup_hash_lock [i] = SPIN_LOCK_INITIALIZER;
128      }      }
129        
130    /* Count number of available pages.  */    /* Count number of available pages.  */
# Line 121  vm_page_resident_pages_init (vm_offset_t Line 146  vm_page_resident_pages_init (vm_offset_t
146          {          {
147            assert (pa != 0);            assert (pa != 0);
148    
149            pages [resident_pages++].phys_addr = pa;            vm_page_init (& pages[resident_pages++], pa);
150            pa += VM_PAGE_SIZE;            pa += VM_PAGE_SIZE;
151          }          }
152      }      }
153    
   /* ??? release pages.  */  
154    for (i = resident_pages; i >= 0; --i)    for (i = resident_pages; i >= 0; --i)
155      {      {
156        trace_count (n_vm_pages++);        trace_count (n_vm_pages++);
# Line 145  vm_page_resident_pages_init (vm_offset_t Line 169  vm_page_resident_pages_init (vm_offset_t
169    vm_resident_pages_init = true;    vm_resident_pages_init = true;
170  }  }
171    
172    
173  /* Allocate memory at bootstrap time.  SIZE is rounded up to  /* Allocate memory at bootstrap time.  SIZE is rounded up to
174     page size.  Return pointer to memory block (what else?).  */     page size.  Return pointer to memory block (what else?).  */
   
175  vm_offset_t  vm_offset_t
176  vm_page_bootalloc (vm_size_t size)  vm_page_bootalloc (vm_size_t size)
177  {  {
# Line 155  vm_page_bootalloc (vm_size_t size) Line 179  vm_page_bootalloc (vm_size_t size)
179    return PMAP_STEAL_MEMORY (size);    return PMAP_STEAL_MEMORY (size);
180  }  }
181    
 /* Initialize a new page structure. Used when bootstraping the system  
    and when memory have been released back to system after startup.    
    PAGE is the VM page structure that we will initialize.  PHYS_ADDR is  
    the physical address of the page.  */  
 void  
 vm_page_init (struct vm_page *page, void *phys_addr)  
 {  
   page->phys_addr = (vm_offset_t) phys_addr;  
 }  
   
182  /* Load [START, END) physical memory into the VM system,  /* Load [START, END) physical memory into the VM system,
183     of which [AVAIL_START, AVAIL_END) is available.  */     of which [AVAIL_START, AVAIL_END) is available.  */
184  void  void
# Line 183  vm_page_load (vm_offset_t start, vm_offs Line 197  vm_page_load (vm_offset_t start, vm_offs
197    ps->avail_end   = avail_end;    ps->avail_end   = avail_end;
198  }  }
199    
200  /* Release PAGE.  Put it on the free list.  */  /* Initialize a new page structure. Used when bootstraping the system
201       and when memory have been released back to system after startup.  
202       PAGE is the VM page structure that we will initialize.  PHYS_ADDR is
203       the physical address of the page.  */
204    void
205    vm_page_init (struct vm_page *page, vm_offset_t phys_addr)
206    {
207      *page = vm_page_template;
208      page->phys_addr = (vm_offset_t) phys_addr;
209    }
210    
211    /* After the VM system is up, machine-dependent code
212       may stumble across more physical memory.  For example,
213       memory that it was reserving for a frame buffer.
214       vm_page_create turns this memory into available pages.  */
215  void  void
216  vm_page_release (struct vm_page *page)  vm_page_create (vm_offset_t start, vm_offset_t end)
217  {  {
218    SPL_T spl;    struct vm_page *m;
219      vm_offset_t paddr;
220    
221    if (page->fictitious_p)    for (paddr = vm_round_page (start);
222           paddr < vm_trunc_page (end);
223           paddr += VM_PAGE_SIZE)
224      {      {
225        kmem_cache_free (fict_cache, page);        m = (struct vm_page *) kmem_cache_alloc (page_cache);
226          if (m == 0)
227            panic("vm_page_create");
228    
229          vm_page_init (m, paddr);
230          vm_page_release (m);
231      }      }
232    else  }
233    
234    /* Return page MEM to the free list.  */
235    void
236    vm_page_release (struct vm_page *mem)
237    {
238      if (mem->free_p)
239        panic ("vm_page_release");
240      mem->free_p = 1;
241    
242      vm_page_lock_queues ();
243    
244      queue_enter (&vm_page_free_queue, mem, struct vm_page *, listq);
245      vm_page_free_count++;
246    
247      vm_page_unlock_queues ();
248    
249    
250    #if 0
251      /* Check if we should wake up someone waiting for page.
252         But don't bother waking them unless they can allocate.  */
253    
254      if ((vm_page_free_wanted > 0) &&
255          (vm_vm_page_free_count >= vm_page_free_reserved))
256      {      {
257        trace_count (n_vm_free_pages++);        vm_page_free_wanted--;
258        host_info_basic.free_pages++;        thread_wakeup_one((event_t) &vm_page_free_count);
259        }
260    #endif
261    }
262    
263        spl = SPLOFF ();  /* Returns the given page to the inactive list,
264        if (page->active_p)     indicating that no physical maps have access.  */
265          {  void
266            spin_lock (&page_active_lock);  vm_page_deactivate (struct vm_page *m)
267            queue_remove (&page_active_list, page, struct vm_page *, listq);  {
268            spin_unlock (&page_active_lock);    /* This page is no longer very interesting.  If it was
269          }       interesting (active or inactive/referenced), then we
270         clear the reference bit and (re)enter it in the
271         inactive queue.  */
272    
273      if (m->active_p || (m->inactive_p && m->reference_p))
274        {
275    #if 0
276          if (!m->fictitious_p && !m->absent_p)
277            pmap_clear_reference (m->phys_addr);
278    #endif
279          m->reference_p = 0;
280          VM_PAGE_QUEUES_REMOVE (m);
281        }
282    
283      if (! m->inactive_p)
284        {
285          vm_page_lock_queues ();
286          queue_enter (&vm_page_inactive_queue, m, struct vm_page *, listq);
287          m->inactive_p = 1;
288          vm_page_unlock_queues ();
289        }
290    }
291    
292        page_free_count++;  /* Put the specified page on the active list.  */
293        spin_lock (&page_free_lock);  void
294        queue_enter (&page_free_list, page, struct vm_page *, listq);  vm_page_activate (struct vm_page *m)
295        spin_unlock (&page_free_lock);  {
296        SPLON (spl);    if (m->active_p)
297        panic ("vm_page_activate: already active");
298    
299        host_info_basic.free_pages++;    vm_page_lock_queues ();
300      if (m->inactive_p)
301        {
302          queue_remove (& vm_page_inactive_queue, m, struct vm_page *, listq);
303          m->inactive_p = 0;
304      }      }
305    
306      queue_enter(& vm_page_active_queue, m, struct vm_page *, listq);
307      m->active_p = 1;
308      vm_page_unlock_queues ();
309  }  }
310    
311    /* Returns the given page to the free list, disassociating
312       it with any VM object.  Object must be locked prior to entry.  */
313    void
314    vm_page_free (struct vm_page *mem)
315    {
316      if (mem->free_p)
317        panic("vm_page_free");
318    
319      if (mem->tabled_p)
320        vm_page_remove (mem);
321      VM_PAGE_QUEUES_REMOVE(mem);
322    
323      VM_PAGE_WAKEUP_DONE(mem);
324    
325      if (! mem->fictitious_p)
326        {
327          vm_page_init (mem, mem->phys_addr);
328          vm_page_release (mem);
329        }
330      else
331        vm_page_release_fictitious (mem);
332    }
333    
334    
335  /* Allocate a new page from the free list.  This may start the  /* Allocate a new page from the free list.  This may start the
336     pageout daemon of the go below the used-pages threshold.  */     pageout daemon of the go below the used-pages threshold.  */
337  struct vm_page *  struct vm_page *
338  vm_page_allocate (void)  vm_page_allocate (void)
339  {  {
340    struct vm_page *page;    struct vm_page *page;
   SPL_T spl;  
341    
342    if (queue_empty (& page_free_list))    vm_page_lock_queues ();
343      return 0;    if (queue_empty (& vm_page_free_queue))
344        {
345          vm_page_unlock_queues ();
346          return 0;
347        }
348    
349    spl = SPLOFF ();    queue_remove_last (& vm_page_free_queue, page, struct vm_page *, listq);
350    spin_lock (&page_free_lock);    vm_page_free_count--;
351    queue_remove_last (& page_free_list, page, struct vm_page *, listq);  
352    spin_unlock (&page_free_lock);    vm_page_unlock_queues ();
353    page_free_count--;  
354      page->free_p = 0;
   spin_lock (&page_active_lock);  
   queue_enter (&page_active_list, page, struct vm_page *, listq);  
   spin_unlock (&page_active_lock);  
   SPLON (spl);  
355    
356    host_info_basic.free_pages--;    host_info_basic.free_pages--;
357    
# Line 246  vm_page_allocate (void) Line 361  vm_page_allocate (void)
361    return page;    return page;
362  }  }
363    
364  /* Allocate a fictitious page.  We returned page have no  /* Remove a fictitious page from the free list.
365     physical page assigned.  On failure NULL is returned.  */     Returns NULL if there are no free pages.  */
366  struct vm_page *  struct vm_page *
367  vm_page_fictitious_allocate (void)  vm_page_grab_fictitious (void)
368  {  {
369    struct vm_page *page;    struct vm_page *m = 0;
370    
371    page = (struct vm_page *) kmem_cache_alloc (fict_cache);    vm_page_lock_queues ();
372    if (page)    if (! queue_empty (& vm_page_fictitious_free_queue))
373      {      {
374        memset (page, 0, sizeof (struct vm_page));        queue_remove_first (& vm_page_fictitious_free_queue, m,
375        page->fictitious_p = true;                            struct vm_page *, listq);
376          vm_page_fictitious_free_count--;
377          vm_page_init (m, 0);
378          m->fictitious_p = 1;
379      }      }
380    return page;    vm_page_unlock_queues ();
381      return m;
382    }
383    
384    /* Release a fictitious page to the free list.  */
385    void
386    vm_page_release_fictitious (struct vm_page *m)
387    {
388      if (m->free_p)
389        panic ("vm_page_release_fictitious");
390      m->free_p = 1;
391    
392      vm_page_lock_queues ();
393      queue_enter (& vm_page_fictitious_free_queue, m, struct vm_page *, listq);
394      vm_page_fictitious_free_count++;
395      vm_page_unlock_queues ();
396    }
397    
398    /* Add more fictitious pages to the free list.
399       Allowed to block.  */
400    int vm_page_fictitious_quantum = 5;
401    
402    void
403    vm_page_more_fictitious (void)
404    {
405      struct vm_page *m;
406      int i;
407    
408      for (i = 0; i < vm_page_fictitious_quantum; i++)
409        {
410          m = (struct vm_page *) kmem_cache_alloc (page_cache);
411          if (! m)
412            panic ("vm_page_more_fictitious");
413    
414          vm_page_init (m, 0);
415          m->fictitious_p = 1;
416          vm_page_release_fictitious (m);
417        }
418    }
419    
420    /* Attempt to convert fictitious page M into a real page.  
421       Return true if we succeded, otherwise return false.  */
422    int
423    vm_page_convert (struct vm_page *m)
424    {
425      struct vm_page *real_m;
426    
427      real_m = vm_page_allocate ();
428      if (real_m == 0)
429        return 0;
430    
431      m->phys_addr = real_m->phys_addr;
432      m->fictitious_p = 0;
433    
434      real_m->phys_addr = 0;
435      real_m->fictitious_p = 1;
436    
437      vm_page_free (real_m);
438      return 1;
439  }  }
440    
441  /* Assign a physical address, PA, to PAGE.  */  /* Assign a physical address, PA, to PAGE.  */
# Line 270  vm_page_assign (struct vm_page *page, vm Line 446  vm_page_assign (struct vm_page *page, vm
446    page->phys_addr = pa;    page->phys_addr = pa;
447  }  }
448    
   
449  /* Grab a physical page.  The physical page address is returned.  */  /* Grab a physical page.  The physical page address is returned.  */
450  void *  void *
451  vm_page_grab_physical (void)  vm_page_grab_physical (void)
452  {  {
453    struct vm_page *page;    struct vm_page *page = vm_page_allocate ();
454    SPL_T spl;    return page ? (void *) page->phys_addr : 0;
   
   if (queue_empty (&page_free_list))  
     return 0;  
   
   spl = SPLOFF ();  
   spin_lock (&page_free_lock);  
   queue_remove_last (& page_free_list, page, struct vm_page *, listq);  
   spin_unlock (&page_free_lock);  
   SPLON (spl);  
     
   page_free_count--;  
   host_info_basic.free_pages--;  
   
   return (void *) page->phys_addr;  
455  }  }
456    
457  /* Copy page SRC_PAGE to DST_PAGE.  */  /* Copy page SRC_PAGE to DST_PAGE.  */
   
458  void  void
459  vm_page_copy (struct vm_page *dst_page, struct vm_page *src_page)  vm_page_copy (struct vm_page *dst_page, struct vm_page *src_page)
460  {  {
# Line 303  vm_page_copy (struct vm_page *dst_page, Line 463  vm_page_copy (struct vm_page *dst_page,
463            VM_PAGE_SIZE);            VM_PAGE_SIZE);
464  }  }
465    
466    /* Fill PAGE will zerors.  */
467    void
468    vm_page_zero_fill (struct vm_page *page)
469    {
470      memset ((void *) page->phys_addr, 0, VM_PAGE_SIZE);
471    }
472    
473  /* Insert PAGE into OBJECT at OFFSET.  PAGE can not be inserted in  /* Insert PAGE into OBJECT at OFFSET.  PAGE can not be inserted in
474     any other object.  */     any other object.  */
# Line 313  vm_page_insert (struct vm_page *page, st Line 479  vm_page_insert (struct vm_page *page, st
479    int hash_index;    int hash_index;
480    SPL_T spl;    SPL_T spl;
481    
   /* Check if page already have been tabled in another object.  */  
   if (page->tabled_p)  
     trace_printf ("tabled object is %p", page->object);  
   
482    assert (page->tabled_p == false);    assert (page->tabled_p == false);
483    
484    hash_index = PAGE_HASH_FN (object, offset);    hash_index = PAGE_HASH_FN (object, offset);
485    
   spl = SPLOFF ();  
   spin_lock (& page_hash_lock [hash_index]);  
     
486    /* Insert entry in hash table.  */    /* Insert entry in hash table.  */
487    queue_enter (& page_hash_table [hash_index], page, struct vm_page *, hashq);    spl = SPLOFF ();
488      spin_lock (& vm_page_lookup_hash_lock [hash_index]);
489      queue_enter (& vm_page_lookup_hash_table [hash_index], page, struct vm_page *, hashq);
490      spin_unlock (& vm_page_lookup_hash_lock [hash_index]);
491      SPLON (spl);
492    
493    page->tabled_p = true;    page->tabled_p = true;
494    page->object = object;    page->object = object;
# Line 334  vm_page_insert (struct vm_page *page, st Line 497  vm_page_insert (struct vm_page *page, st
497    /* Insert page in object.  */    /* Insert page in object.  */
498    queue_enter (& object->pageq, page, struct vm_page *, pageq);    queue_enter (& object->pageq, page, struct vm_page *, pageq);
499    object->resident_page_cnt++;    object->resident_page_cnt++;
500    }
501    
502    spin_unlock (& page_hash_lock [hash_index]);  /* Allocate and return a memory cell associated with this
503    SPLON (spl);     VM object/offset pair.  Object must be locked.  */
504    struct vm_page *
505    vm_page_alloc (struct vm_object *object, vm_offset_t offset)
506    {
507      struct vm_page *mem;
508    
509      mem = vm_page_allocate ();
510      if (mem == 0)
511        return 0;
512    
513      vm_page_insert(mem, object, offset);
514      return mem;
515  }  }
516    
517  /* Exactly as vm_page_insert, except that we remove the page from  /* Exactly as vm_page_insert, except that we remove the page from
# Line 361  vm_page_lookup (struct vm_object *object Line 536  vm_page_lookup (struct vm_object *object
536    hash_index = PAGE_HASH_FN (object, offset);    hash_index = PAGE_HASH_FN (object, offset);
537    
538    spl = SPLOFF ();    spl = SPLOFF ();
539    spin_lock (& page_hash_lock [hash_index]);    spin_lock (& vm_page_lookup_hash_lock [hash_index]);
     
540    /* Loop through all entries in hash bucket and compare <OBJECT, OFFSET>.  */    /* Loop through all entries in hash bucket and compare <OBJECT, OFFSET>.  */
541    queue_iterate (& page_hash_table [hash_index], page, struct vm_page *, hashq)    queue_iterate (& vm_page_lookup_hash_table [hash_index], page,
542                     struct vm_page *, hashq)
543      {      {
544        if (page->object == object && page->offset == offset)        if (page->object == object && page->offset == offset)
545          {          {
546            spin_unlock (& page_hash_lock [hash_index]);            spin_unlock (& vm_page_lookup_hash_lock [hash_index]);
547            SPLON (spl);            SPLON (spl);
   
548            return page;            return page;
549          }          }
550      }      }
551      spin_unlock (& vm_page_lookup_hash_lock [hash_index]);
   spin_unlock (& page_hash_lock [hash_index]);  
552    SPLON (spl);    SPLON (spl);
   
553    return 0;    return 0;
554  }  }
555    
# Line 392  vm_page_remove (struct vm_page *page) Line 564  vm_page_remove (struct vm_page *page)
564    assert (page->tabled_p == true);    assert (page->tabled_p == true);
565    
566    hash_index = PAGE_HASH_FN (page->object, page->offset);    hash_index = PAGE_HASH_FN (page->object, page->offset);
   
   spl = SPLOFF ();  
   spin_lock (& page_hash_lock [hash_index]);  
567        
568    /* Remove PAGE from hash bucket.  */    /* Remove PAGE from hash bucket.  */
569    queue_remove (& page_hash_table [hash_index], page, struct vm_page *, hashq);    spl = SPLOFF ();
570      spin_lock (& vm_page_lookup_hash_lock [hash_index]);
571      queue_remove (& vm_page_lookup_hash_table [hash_index], page, struct vm_page *, hashq);
572      spin_unlock (& vm_page_lookup_hash_lock [hash_index]);
573      SPLON (spl);
574    page->tabled_p = false;    page->tabled_p = false;
575    
576    /* Remove PAGE from object.  */    /* Remove PAGE from object.  */
577    queue_remove (& page->object->pageq, page, struct vm_page *, pageq);    queue_remove (& page->object->pageq, page, struct vm_page *, pageq);
578    page->object->resident_page_cnt--;    page->object->resident_page_cnt--;
   
   spin_unlock (& page_hash_lock [hash_index]);  
   SPLON (spl);  
579  }  }
580    

Legend:
Removed from v.1.7  
changed lines
  Added in v.1.8

savannah-hackers-public@gnu.org
ViewVC Help
Powered by ViewVC 1.1.26