/[lkdp]/lkdp/mm/process.tex
ViewVC logotype

Diff of /lkdp/mm/process.tex

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1.5 by nayaniabhishek, Sun Jun 9 06:38:34 2002 UTC revision 1.6 by nayaniabhishek, Thu Jun 13 13:02:32 2002 UTC
# Line 28  struct mm_struct { Line 28  struct mm_struct {
28  \end{verbatim}  \end{verbatim}
29    
30          \begin{description}          \begin{description}
31          \item[mmap] \index{mmap} A linked list of VMAs belonging to the process.          \item[mmap] \index{mmap} A linked list of VMAs belonging to this address
32            space sorted by address.
33          \item[mm\_rb] \index{mm\_rb} When the number of VMAs increase beyond a certain number, a red black tree is also used to access them. mm\_rb points to the root node.          \item[mm\_rb] \index{mm\_rb} When the number of VMAs increase beyond a certain number, a red black tree is also used to access them. mm\_rb points to the root node.
34          \item[mmap\_cache] \index{mmap\_cache} Points to the last VMA accessed.          \item[mmap\_cache] \index{mmap\_cache} Points to the last VMA accessed.
35          \item[pgd] \index{pgd} Is the Page Global Directory of the process.          \item[pgd] \index{pgd} Is the Page Global Directory of the process.
36          \item[mm\_users] \index{mm\_users}          \item[mm\_users] \index{mm\_users} Number of process sharing this structure.
37          \item[mm\_count] \index{mm\_count}          \item[mm\_count] \index{mm\_count} Number of non-user references to it + 1
38            (for all the users).
39          \item[map\_count] \index{map\_count} Number of VMAs.          \item[map\_count] \index{map\_count} Number of VMAs.
40          \item[mmap\_sem] \index{mmap\_sem} Semaphore used to serialize access to this structure.          \item[mmap\_sem] \index{mmap\_sem} Semaphore used to serialize access to                        this structure.
41          \item[page\_table\_lock] \index{page\_table\_lock} Protects page tables and the rss field from concurrent access.          \item[page\_table\_lock] \index{page\_table\_lock} Protects page tables and
42                    the rss field from concurrent access.
43          \item[mmlist] \index{mmlist} List of all active mm's.These are globally strung          \item[mmlist] \index{mmlist} List of all active mm's.These are globally strung
44                 together off init\_mm.mmlist and are protected by mmlist\_lock.                 together off init\_mm.mmlist and are protected by mmlist\_lock.
45          \item[start\_code] \index{start\_code} Points to the starting address of the code section.          \item[start\_code] \index{start\_code} Points to the starting address of the
46                    code section.
47          \item[end\_code] \index{end\_code} Points to the end address of the code section.          \item[end\_code] \index{end\_code} Points to the end address of the code section.
48          \item[start\_data] \index{start\_data} Points to the starting address of the data section.          \item[start\_data] \index{start\_data} Points to the starting address of the
49                    data section.
50          \item[end\_data] \index{end\_data} Points to the end address of the data section.          \item[end\_data] \index{end\_data} Points to the end address of the data section.
51          \item[start\_brk] \index{start\_brk} Points to the start address of the heap area.          \item[start\_brk] \index{start\_brk} Points to the start address of the heap area.
52          \item[brk] \index{brk} Points to the end address of the heap area.          \item[brk] \index{brk} Points to the end address of the heap area.
53          \item[start\_stack] \index{start\_stack}          \item[start\_stack] \index{start\_stack} Points to the start address of the
54          \item[arg\_start] \index{arg\_start}                                  stack.
55          \item[arg\_end] \index{arg\_end}          \item[arg\_start] \index{arg\_start} Points to the start address of the arguments.
56          \item[env\_start] \index{env\_start}          \item[arg\_end] \index{arg\_end} Points to the end address of the arguments.
57          \item[env\_end] \index{env\_end}          \item[env\_start] \index{env\_start} Points to the start address of the environmet.
58            \item[env\_end] \index{env\_end} Points to the end address of the environment.
59          \item[rss] \index{rss} Number of pages currently in memory.          \item[rss] \index{rss} Number of pages currently in memory.
60          \item[total\_vm] \index{total\_vm}          \item[total\_vm] \index{total\_vm} Total number of pages used by this process.
61          \item[locked\_vm] \index{locked\_vm}          \item[locked\_vm] \index{locked\_vm} Number of pages locked by this process
62          \item[def\_flags] \index{def\_flags}                          (ie. unswappable pages).
63          \item[cpu\_vm\_mask] \index{cpu\_vm\_mask}          \item[def\_flags] \index{def\_flags} The default flags for this address space.
64          \item[swap\_address] \index{swap\_address}          \item[cpu\_vm\_mask] \index{cpu\_vm\_mask} A mask used to keep track of all
65          \item[dumpable] \index{dumpable}          the CPUs accessing this mm (and have TLB entries). Used for TLB shootdown.
66          \item[context] \index{context}          \item[swap\_address] \index{swap\_address} Used to store the last address
67            swapped to disk. Set in \texttt{swap\_out\_pmd} and used by
68            \texttt{swap\_out\_mm} to find the VMA being swapped out.
69            \item[dumpable] \index{dumpable} This bit is used as a flag which controls
70            the creation of a core dump.
71            \item[context] \index{context} Used to store segment information.
72          \end{description}          \end{description}
73    
74  \subsection{struct vm\_area\_struct}  \subsection{struct vm\_area\_struct}
# Line 89  struct vm_area_struct { Line 100  struct vm_area_struct {
100          \item[vm\_mm] The address space we belong to.          \item[vm\_mm] The address space we belong to.
101          \item[vm\_start] Our start address within vm\_mm.          \item[vm\_start] Our start address within vm\_mm.
102          \item[vm\_end] The first byte after our end address within vm\_mm.          \item[vm\_end] The first byte after our end address within vm\_mm.
103          \item[vm\_next] Linked list of VM areas per task, sorted by address.          \item[vm\_next] Used to point to the next VMA in a list.
104          \item[vm\_page\_prot] Access permissions of this VMA.          \item[vm\_page\_prot] Access permissions of this VMA.
105          \item[vm\_flags] Various flags describing this memory area.          \item[vm\_flags] Various flags describing this memory area.
106          \item[vm\_rb] A rb tree used to contain all the VMAs for faster access when high in number.          \item[vm\_rb] A rb tree used to contain all the VMAs for faster access when
107          \item[vm\_next\_share] xxxx          more in number.
108          \item[vm\_pprev\_share] xxxx          \item[vm\_next\_share] If this VMA is mapping a file, this field points to
109          \item[vm\_ops] xxxx          another VMA (different process), mapping (sharing) the same part of the file.
110          \item[vm\_pgoff] xxxx          \item[vm\_pprev\_share] Same function as above, but points to previous node in
111            the list.
112            \item[vm\_ops] A set of functions to act on this memory region.
113            \item[vm\_pgoff] If we are mapping a file, this field gives us the offset
114            within the file this region maps in terms of number of pages.
115          \item[vm\_file] If this memory region is mapping a file, this pointer is used to point to it (can be NULL).          \item[vm\_file] If this memory region is mapping a file, this pointer is used to point to it (can be NULL).
116          \item[vm\_raend] xxxx          \item[vm\_raend] Stores the file offset (from \textit{vm\_pgoff}) till
117          \item[vm\_private\_data] xxxx          which the data will be read, in the next read-ahead operation.
118            \item[vm\_private\_data] Used by drivers to store their own data.
119          \end{description}          \end{description}
120    
121    
# Line 139  Briefly, these counters are used as foll Line 155  Briefly, these counters are used as foll
155          \item[maj\_flt] Counts the number of major page faults (ie. when ever a page had to be loaded from the swap).          \item[maj\_flt] Counts the number of major page faults (ie. when ever a page had to be loaded from the swap).
156          \item[cmin\_flt] Counts the number of minor page faults of its children.          \item[cmin\_flt] Counts the number of minor page faults of its children.
157          \item[cmaj\_flt] Counts the number of major page faults of its children.          \item[cmaj\_flt] Counts the number of major page faults of its children.
158          \item[nswap] Counts the number of xxxxxxxx.          \item[nswap] Not used or updated anywhere, dead code.
159          \item[cnswap] Counts the number of xxxxxxx.          \item[cnswap] Not used or updated anywhere, dead code.
160          \end{description}          \end{description}
161          \begin{verbatim}          \begin{verbatim}
162                    
# Line 188  good\_mm where it is assigned to the new Line 204  good\_mm where it is assigned to the new
204                  goto fail_nomem;                  goto fail_nomem;
205    
206          \end{verbatim}          \end{verbatim}
207          Next we copy the mm\_struct of parent to the newly created descriptor. Then we          Next we copy the mm\_struct of parent to the newly created descriptor. Then we initialize some of its fields by calling \texttt{mm\_init()} which is discussed further in section~\ref{fun5:mi}.
 initialize some of its fields by calling \texttt{mm\_init()} which is discussed further  
 in section~\ref{fun5:mi}.  
208          \begin{verbatim}          \begin{verbatim}
209    
210              if (init_new_context(tsk,mm))              if (init_new_context(tsk,mm))
# Line 208  in section~\ref{fun5:mi}. Line 222  in section~\ref{fun5:mi}.
222                  goto free_pt;                  goto free_pt;
223    
224          \end{verbatim}          \end{verbatim}
225          Then we call \texttt{dup\_mmap()} to initialize the rest of the fields and also          Then we call \texttt{dup\_mmap()} to initialize the rest of the fields and also copy the memory region descriptors (vm\_area\_struct). It is covered in section~\ref{fun5:dupm}.
 copy the memory region descriptors (vm\_area\_struct). It is covered in section~\ref{fun5:dupm}.  
226          \begin{verbatim}          \begin{verbatim}
227    
228          /*          /*
# Line 218  copy the memory region descriptors (vm\_ Line 231  copy the memory region descriptors (vm\_
231              copy_segments(tsk, mm);              copy_segments(tsk, mm);
232    
233          \end{verbatim}          \end{verbatim}
234          If the parent task has an LDT (Local Descriptor Table), it is copied to the new          If the parent task has an LDT (Local Descriptor Table), it is copied to the new memory descriptor.
 memory descriptor.  
235          \begin{verbatim}          \begin{verbatim}
236    
237          good_mm:          good_mm:
# Line 228  memory descriptor. Line 240  memory descriptor.
240              return 0;              return 0;
241    
242          \end{verbatim}          \end{verbatim}
243          We come here when the CLONE\_VM flag is set. We just point to (use) the same          We come here when the CLONE\_VM flag is set. We just point to (use) the same memory descriptor as the parent.
 memory descriptor as the parent.  
244          \begin{verbatim}          \begin{verbatim}
245    
246          free_pt:          free_pt:
# Line 253  memory descriptor as the parent. Line 264  memory descriptor as the parent.
264          \begin{verbatim}          \begin{verbatim}
265          int dup_mmap(struct mm_struct * mm)          int dup_mmap(struct mm_struct * mm)
266          \end{verbatim}          \end{verbatim}
267          This function is called to initialize some fields and memory region descriptors          This function is called to initialize some fields and memory region descriptors of a mm\_struct.
 of a mm\_struct.  
268          \begin{verbatim}          \begin{verbatim}
269          struct vm_area_struct * mpnt, *tmp, **pprev;          struct vm_area_struct * mpnt, *tmp, **pprev;
270          int retval;          int retval;
# Line 262  of a mm\_struct. Line 272  of a mm\_struct.
272          flush_cache_mm(current->mm);          flush_cache_mm(current->mm);
273                    
274          \end{verbatim}          \end{verbatim}
275            This function is used to flush all pages belonging to the given \textit{mm} from the cache. This function is a no-op on the i386.
276          \begin{verbatim}          \begin{verbatim}
277                    
278          mm->locked_vm = 0;          mm->locked_vm = 0;
# Line 275  of a mm\_struct. Line 285  of a mm\_struct.
285          pprev = &mm->mmap;          pprev = &mm->mmap;
286    
287          \end{verbatim}          \end{verbatim}
288            Basic initialization.  
289          \begin{verbatim}          \begin{verbatim}
290                    
291          /*          /*
# Line 290  of a mm\_struct. Line 300  of a mm\_struct.
300          spin_unlock(&mmlist_lock);          spin_unlock(&mmlist_lock);
301    
302          \end{verbatim}          \end{verbatim}
303            We add this new structure to the global list of address spaces immediately after its parents address space. Then we increment the \textit{mmlist\_nr} counter which keeps track of the number of address spaces in the list. Access to this list is protected by \texttt{mmlist\_lock}.
304          \begin{verbatim}          \begin{verbatim}
305                    
306          for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {          for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
# Line 301  of a mm\_struct. Line 311  of a mm\_struct.
311                  continue;                  continue;
312    
313          \end{verbatim}          \end{verbatim}
314            Next we go through the list of VMAs of the parent process and duplicate them in the child's address space.
315            
316            First we check whether the VMA has the VM\_DONTCOPY flag set which protects it from being copied. If it has, then we skip this VMA and continue with the next.
317          \begin{verbatim}          \begin{verbatim}
318                    
319              tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);              tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
# Line 309  of a mm\_struct. Line 321  of a mm\_struct.
321                  goto fail_nomem;                  goto fail_nomem;
322    
323          \end{verbatim}          \end{verbatim}
324            We get a new \texttt{vm\_area\_struct} from the slab cache.
325          \begin{verbatim}          \begin{verbatim}
326                    
327              *tmp = *mpnt;              *tmp = *mpnt;
# Line 318  of a mm\_struct. Line 330  of a mm\_struct.
330              tmp->vm_next = NULL;              tmp->vm_next = NULL;
331                            
332          \end{verbatim}          \end{verbatim}
333            We copy the parents vma to the child's newly allocated vma. Then we reset the VM\_LOCKED flag of the child. Initialize its fields \textit{vm\_mm} to point to the child's address space and \textit{vm\_next} with NULL (as it may be the last node in the list).
334          \begin{verbatim}          \begin{verbatim}
335                    
336              file = tmp->vm_file;              file = tmp->vm_file;
# Line 329  of a mm\_struct. Line 341  of a mm\_struct.
341                      atomic_dec(&inode->i_writecount);                      atomic_dec(&inode->i_writecount);
342    
343          \end{verbatim}          \end{verbatim}
344            If the vma we are copying was mapping a file, the file related fields must also be initialized. After we confirm that we are indeed mapping a file, we get a reference to its inode. We then call the function \texttt{get\_file} on the \textit{file} to increment its counter of number of mappings.
345    
346            Simultaneous read-write and read-only support is not available at the moment. So if the flag VM\_DENYWRITE is set, its a read-only mapping else its read-write. The number of readers or writers on the file mapping is kept track of by the inode's \textit{i\_writecount} field. If its a read-only mapping, its value is decremented else it is incremented. So by looking at \textit{i\_writecount} we can know whether the mapping is read-only (negative) or read-write (positive).  
347          \begin{verbatim}          \begin{verbatim}
348                    
349          /* insert tmp into the share list, just after mpnt */          /* insert tmp into the share list, just after mpnt */
# Line 343  of a mm\_struct. Line 357  of a mm\_struct.
357              }              }
358    
359          \end{verbatim}          \end{verbatim}
360            
361          \begin{verbatim}          \begin{verbatim}
362                    
363          /*          /*
# Line 356  of a mm\_struct. Line 370  of a mm\_struct.
370              mm->map_count++;              mm->map_count++;
371                            
372          \end{verbatim}          \end{verbatim}
373            We now add the VMA to the mmap list and also increment the counter.
374          \begin{verbatim}          \begin{verbatim}
375                    
376              retval = copy_page_range(mm, current->mm, tmp);              retval = copy_page_range(mm, current->mm, tmp);
377              spin_unlock(&mm->page_table_lock);              spin_unlock(&mm->page_table_lock);
378    
379          \end{verbatim}          \end{verbatim}
380            Next we call \texttt{copy\_page\_range()} to copy the page table entries.
381          \begin{verbatim}          \begin{verbatim}
382                    
383              if (tmp->vm_ops && tmp->vm_ops->open)              if (tmp->vm_ops && tmp->vm_ops->open)
# Line 374  of a mm\_struct. Line 388  of a mm\_struct.
388          }          }
389                    
390          \end{verbatim}          \end{verbatim}
391            If there is an open() function defined for this memory region (to perform any initializations), we call it.
392          \begin{verbatim}          \begin{verbatim}
393                    
394          retval = 0;          retval = 0;
395          build_mmap_rb(mm);          build_mmap_rb(mm);
396    
397          \end{verbatim}          \end{verbatim}
398            Next we call \texttt{build\_mmap\_rb()} which creates a red-black tree with the VMAs for faster searches.
399          \begin{verbatim}          \begin{verbatim}
400                    
401  fail_nomem:  fail_nomem:
402          flush_tlb_mm(current->mm);          flush_tlb_mm(current->mm);
403          return retval;          return retval;
404          \end{verbatim}          \end{verbatim}
405            Then we flush the TLB.
406    
407    
408    
409    
# Line 400  fail_nomem: Line 416  fail_nomem:
416          void exit_mm(struct task_struct * tsk)          void exit_mm(struct task_struct * tsk)
417          void __exit_mm(struct task_struct * tsk)          void __exit_mm(struct task_struct * tsk)
418          \end{verbatim}          \end{verbatim}
419            This function is called from \texttt{do\_exit()} whenever a process exits, to delete its address space.
420          \begin{verbatim}          \begin{verbatim}
421          struct mm_struct * mm = tsk->mm;          struct mm_struct * mm = tsk->mm;
422    
423          mm_release();          mm_release();
424            
425            \end{verbatim}
426            The function \texttt{mm\_release()} is only called to notify the parent about the death of its child if the child was created via \textit{vfork()}.
427            \begin{verbatim}
428            
429          if (mm) {          if (mm) {
430              atomic_inc(&mm->mm_count);              atomic_inc(&mm->mm_count);
431              BUG_ON(mm != tsk->active_mm);              BUG_ON(mm != tsk->active_mm);
432                
433            \end{verbatim}
434            We check to see if mm is still valid (not yet dropped) and then increment its \textit{mm\_count} to stop it being dropped from under us. Also \textit{mm} and \textit{active\_mm} needs to be the same.
435            \begin{verbatim}
436            
437          /* more a memory barrier than a real lock */          /* more a memory barrier than a real lock */
438              task_lock(tsk);              task_lock(tsk);
439              tsk->mm = NULL;              tsk->mm = NULL;
440              task_unlock(tsk);              task_unlock(tsk);
441              enter_lazy_tlb(mm, current, smp_processor_id());              enter_lazy_tlb(mm, current, smp_processor_id());
442                
443            \end{verbatim}
444            Since we are about to modify the task structure, we take a lock on it. Then we remove the \textit{mm}'s reference from the task structure. After unlocking the task struct, \texttt{enter\_lazy\_tlb()} is called which is a no-op on a uni-processor.
445            \begin{verbatim}
446            
447              mmput(mm);              mmput(mm);
448          }          }
449          \end{verbatim}          \end{verbatim}
450            Finally \texttt{mmput()} is called to actually destroy \textit{mm\_struct}.
451    
452    
453            
454    
455    
456  \subsection{Function mmput()}  \subsection{Function mmput()}
# Line 424  fail_nomem: Line 459  fail_nomem:
459          \begin{verbatim}          \begin{verbatim}
460          void mmput(struct mm_struct *mm)          void mmput(struct mm_struct *mm)
461          \end{verbatim}          \end{verbatim}
462            This function is used to de-allocate various resources held by the \testtt{mm\_struct} and then drop it.
463          \begin{verbatim}          \begin{verbatim}
464            
465          if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) {          if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) {
466            
467            \end{verbatim}
468            We can drop a \texttt{mm\_struct} only if the number of users sharing this is 1. So the above line decrements \textit{mm\_users} and if it becomes 0, locks the structure.
469            \begin{verbatim}
470            
471              extern struct mm_struct *swap_mm;              extern struct mm_struct *swap_mm;
472              if (swap_mm == mm)              if (swap_mm == mm)
473                  swap_mm = list_entry(mm->mmlist.next,                  swap_mm = list_entry(mm->mmlist.next,
474                                       struct mm_struct, mmlist);                                       struct mm_struct, mmlist);
475            
476            \end{verbatim}
477            The global \texttt{swap\_mm} is used to point to the \textit{mm\_struct} that is going to be swapped out next. Here in the above code we test to see if \textit{swap\_mm} is the same \textit{mm} we are dropping. If it is, then we update \textit{swap\_mm} to point to the next \textit{mm} on the \textit{mm\_list}.
478            \begin{verbatim}
479            
480              list_del(&mm->mmlist);              list_del(&mm->mmlist);
481              mmlist_nr--;              mmlist_nr--;
482              spin_unlock(&mmlist_lock);              spin_unlock(&mmlist_lock);
483            
484            \end{verbatim}
485            Next we remove the mm\_struct from the global \textit{mm\_list}, decrement the \textit{mmlist\_nr} counter and unlock the spinlock on mm\_list which was locked previously in the call to \texttt{atomic\_dec\_and\_lock()}.
486            \begin{verbatim}
487            
488              exit_mmap(mm);              exit_mmap(mm);
489            
490            \end{verbatim}
491            We call \texttt{exit\_mmap()} to do the actual release of all the memory.
492            \begin{verbatim}
493            
494              mmdrop(mm);              mmdrop(mm);
495          }          }
496          \end{verbatim}          \end{verbatim}
497            Lastly \texttt{mmdrop} is called to release the \textit{mm\_struct} to the slab allocator.
498    
499    
500    
# Line 449  fail_nomem: Line 505  fail_nomem:
505          \begin{verbatim}          \begin{verbatim}
506          void exit_mmap(struct mm_struct * mm)          void exit_mmap(struct mm_struct * mm)
507          \end{verbatim}          \end{verbatim}
508            This function does all the grunt work of releasing all the resources from the given \textit{mm\_struct}.
509          \begin{verbatim}          \begin{verbatim}
510          struct vm_area_struct * mpnt;          struct vm_area_struct * mpnt;
511    
512          release_segments(mm);          release_segments(mm);
513            
514            \end{verbatim}
515            If this address space had an associated LDT, it is freed.
516            \begin{verbatim}
517            
518          spin_lock(&mm->page_table_lock);          spin_lock(&mm->page_table_lock);
519          mpnt = mm->mmap;          mpnt = mm->mmap;
520          mm->mmap = mm->mmap_cache = NULL;          mm->mmap = mm->mmap_cache = NULL;
# Line 462  fail_nomem: Line 523  fail_nomem:
523          spin_unlock(&mm->page_table_lock);          spin_unlock(&mm->page_table_lock);
524          mm->total_vm = 0;          mm->total_vm = 0;
525          mm->locked_vm = 0;          mm->locked_vm = 0;
526            
527            \end{verbatim}
528            Next we reset most of the variables (probably because it will be re-used by the slab allocator).
529            \begin{verbatim}
530            
531          flush_cache_mm(mm);          flush_cache_mm(mm);
532            
533            \end{verbatim}
534            The above function is called to flush the caches (L1 and L2). This function on an i386 is a no-op.
535            \begin{verbatim}
536            
537          while (mpnt) {          while (mpnt) {
538              struct vm_area_struct * next = mpnt->vm_next;              struct vm_area_struct * next = mpnt->vm_next;
539              unsigned long start = mpnt->vm_start;              unsigned long start = mpnt->vm_start;
540              unsigned long end = mpnt->vm_end;              unsigned long end = mpnt->vm_end;
541              unsigned long size = end - start;              unsigned long size = end - start;
542    
543            \end{verbatim}
544            Then we start going through each of the VMAs.
545            \begin{verbatim}
546            
547              if (mpnt->vm_ops) {              if (mpnt->vm_ops) {
548                  if (mpnt->vm_ops->close)                  if (mpnt->vm_ops->close)
549                      mpnt->vm_ops->close(mpnt);                      mpnt->vm_ops->close(mpnt);
550              }              }
551            
552            \end{verbatim}
553            If there is a \textit{vm\_ops} defined, then call the close operation on the memory region.
554            \begin{verbatim}
555            
556              mm->map_count--;              mm->map_count--;
557              remove_shared_vm_struct(mpnt);              remove_shared_vm_struct(mpnt);
558              zap_page_range(mm, start, size);              zap_page_range(mm, start, size);
559            
560            \end{verbatim}
561            We decrement the number of VMAs counter, \textit{map\_count} and remove the VMA from the list of shared mappings if it is mapping a file. Then the call to \texttt{zap\_page\_range()} will remove all the page table entries covered by this VMA.
562            \begin{verbatim}
563            
564              if (mpnt->vm_file)              if (mpnt->vm_file)
565                  fput(mpnt->vm_file);                  fput(mpnt->vm_file);
566              kmem_cache_free(vm_area_cachep, mpnt);              kmem_cache_free(vm_area_cachep, mpnt);
567              mpnt = next;              mpnt = next;
568          }          }
569            
570            \end{verbatim}
571            If we were mapping a file, then \texttt{fput()} is called to decrement the number of users count of the file and if it becomes 0, then drop the file structure. Then we release the VMA to the slab allocator and continue with the rest of the VMAs.
572            \begin{verbatim}
573            
574          flush_tlb_mm(mm);          flush_tlb_mm(mm);
575    
576            \end{verbatim}
577            Then we flush the TLB cache.    
578            \begin{verbatim}
579            
580          /* This is just debugging */          /* This is just debugging */
581          if (mm->map_count)          if (mm->map_count)
582              BUG();              BUG();
583    
584          clear_page_tables(mm, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD);          clear_page_tables(mm, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD);
585          \end{verbatim}          \end{verbatim}
586            Lastly, all the page directory and page midle directory entries are cleared.
587    
588    
589    

Legend:
Removed from v.1.5  
changed lines
  Added in v.1.6

savannah-hackers-public@gnu.org
ViewVC Help
Powered by ViewVC 1.1.26