/[nova]/nova/kern/io-buf.c
ViewVC logotype

Diff of /nova/kern/io-buf.c

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1.1.1.1 by jrydberg, Tue Feb 12 19:28:50 2002 UTC revision 1.2 by jrydberg, Wed Mar 27 23:21:54 2002 UTC
# Line 1  Line 1 
1  /* ???  /* Copyright 2002 Johan Rydberg, jrydberg@rtmk.org.
    Copyright 2002 Johan Rydberg, jrydberg@rtmk.org.  
2    
3  This program is free software; you can redistribute it and/or modify  This program is free software; you can redistribute it and/or modify
4  it under the terms of the GNU General Public License as published by  it under the terms of the GNU General Public License as published by
# Line 17  Foundation, Inc., 59 Temple Place - Suit Line 16  Foundation, Inc., 59 Temple Place - Suit
16    
17  #include <stdio.h>  #include <stdio.h>
18  #include <stdlib.h>  #include <stdlib.h>
19    #include <pthread.h>
20    #include <assert.h>
21    #include <string.h>
22    #include <unistd.h>
23  #include <errno.h>  #include <errno.h>
 #include "module.h"  
24    
25  #include "io-buf.h"  #include "io-buf.h"
26    #include "nova-intern.h"
27    #include "io-vfs.h"
28    
29  static struct queue_entry buf_hash_buckets [64];  #define round_bsize(A)  ((((int) A) + 511) & ~511)
 static struct queue_entry buf_lru_list;  
30    
31  #define BUF_HASH_FN(DEV, BLKNO) \  /* Number of bytes currently allocated by buffers.
32    (((int) DEV ^ (BLKNO) * (BLKNO) ^ ((DEV) * BLKNO * 232528)) % 64)     If this grows above limit we must release buffers.  */
33    size_t io_buf_outstanding_data_count = 0;
34    
35    /* We have three different linked lists that a buffer can
36       live on; free (empty), least recently used and the AGE list.  */
37    #define BQUEUE_FREE     0
38    #define BQUEUE_AGE      1
39    #define BQUEUE_LRU      2
40    
41    /* Linked list headers for the lists described above.  */
42    struct queue_entry io_buf_queues [3];
43    
44    /* Lock for the queues above.  */
45    pthread_mutex_t io_buf_queues_lock = PTHREAD_MUTEX_INITIALIZER;
46    #define io_buf_lock_queues()    pthread_mutex_lock (&io_buf_queues_lock)
47    #define io_buf_unlock_queues()  pthread_mutex_unlock (&io_buf_queues_lock)
48    
49    /* Cache buckets.  Currently 256 of them.  */
50    struct queue_entry io_buf_cache_queues [256];
51    
52    /* Hash function for VP plus BLKNO.  */
53    #define BHASH(VP, BLK)  (((((unsigned int) VP) << 4) + (BLK)) % 256)
54    
55    /* Sometime we want a free buffer.  That is signalled on this
56       condition variable.  The mutex is the queue lock.  */
57    pthread_cond_t io_buf_buffers_wanted = PTHREAD_COND_INITIALIZER;
58    
59    #if 0
60    #define VLOCK(VP) pthread_mutex_lock (&(VP)->v_lock)
61    #define VUNLOCK(VP) pthread_mutex_unlock (&(VP)->v_lock)
62    #endif
63    
64    /* ??? */
65    int io_buf_cache_hits;
66    int io_buf_cache_misses;
67    
68    /* Associate buffer BP with vnode VP.  The buffer
69       will hold a reference to the vnode.  */
70  void  void
71  init_io_bufs (void)  io_buf_associate (struct vnode *vp, struct io_buf *bp)
72  {  {
73    int i;    vhold (vp);
74      bp->vp = vp;
75    
76      if (vp->v_type == VBLK || vp->v_type == VCHR)
77        bp->dev = vp->v_rdev;
78      else
79        bp->dev = NODEV;
80    
81      /* Insert onto list for new vnode.  */
82      pthread_mutex_lock (&vp->v_lock);
83      queue_enter (&vp->v_cleanbufs, bp, struct io_buf *, nodeq);
84      pthread_mutex_unlock (&vp->v_lock);
85    
86      bp->flags |= B_VNCLEAN;
87    }
88    
89    /* Disassociate buffer BP from vnode VP.  
90       This removes the buffer for {clean|dirty} queue in vnode.  */
91    void
92    io_buf_disassociate (struct io_buf *bp)
93    {
94      struct vnode *vp = bp->vp;
95    
96      /* Delete from old vnode list, if on one.  */
97      pthread_mutex_lock (&vp->v_lock);
98      if (bp->flags & B_VNCLEAN)
99        {
100          queue_remove (&vp->v_cleanbufs, bp, struct io_buf *, nodeq);
101        }
102      else if (bp->flags & B_VNDIRTY)
103        {
104          queue_remove (&vp->v_dirtybufs, bp, struct io_buf *, nodeq);
105        }
106      bp->flags &= ~(B_VNCLEAN | B_VNDIRTY);
107      pthread_mutex_unlock (&vp->v_lock);
108    
109      bp->vp = NULL;
110      vrelease (vp);
111    }
112    
113    /* Reassign buffer BP from one vnode to new vnode NEWVP.
114       Used to assign file specific control information
115       (indirect blocks) to the vnode to which they belong.  */
116    void
117    io_buf_reassign (struct io_buf *bp, struct vnode *newvp)
118    {
119      struct vnode *vp = bp->vp;
120      struct queue_entry *qp;
121      int newflags;
122    
123      /* Delete from old vnode list, if on one.  */
124      if (bp->flags & B_VNCLEAN)
125        {
126          queue_remove (&vp->v_cleanbufs, bp, struct io_buf *, nodeq);
127        }
128      else if (bp->flags & B_VNDIRTY)
129        {
130          queue_remove (&vp->v_dirtybufs, bp, struct io_buf *, nodeq);
131        }
132      bp->flags &= ~(B_VNCLEAN | B_VNDIRTY);
133    
134      /* If dirty, put on list of dirty buffers;
135         otherwise insert onto list of clean buffers.  */
136      if ((bp->flags & B_DELWRITE) == 0)
137        {
138          qp = &newvp->v_cleanbufs;
139          newflags = B_VNCLEAN;
140        }
141      else
142        {
143          qp = &newvp->v_dirtybufs;
144          newflags = B_VNDIRTY;
145        }
146    
147      queue_enter (qp, bp, struct io_buf *, nodeq);
148      bp->flags |= newflags;
149      bp->vp = newvp;
150    }
151    
152    /* Return pointer to buffer BLKNO associated with vnode VP.
153       Return NULL if the buffer is not in the cache.  */
154    static struct io_buf *
155    incore (void *vp, off_t blkno)
156    {
157      struct io_buf *bp, *f = 0;
158      struct queue_entry *qp;
159    
160      io_buf_lock_queues ();
161      qp = &io_buf_cache_queues [BHASH (vp, blkno)];
162      queue_iterate (qp, bp, struct io_buf *, hashq)
163        {
164          if (bp->vp == vp && bp->blkno == blkno
165              && ! (bp->flags & B_INVAL))
166            {
167              io_buf_cache_hits++;
168              f = bp; break;
169            }
170        }
171      io_buf_unlock_queues ();
172      if (! f)
173        io_buf_cache_misses++;
174      return f;
175    }
176    
177    /* Add buffer BP to the cache (hash lookup table).  */
178    void
179    addcache (struct io_buf *bp)
180    {
181      struct queue_entry *qp;
182    
183      io_buf_lock_queues ();
184      qp = &io_buf_cache_queues [BHASH (bp->vp, bp->blkno)];
185      queue_enter (qp, bp, struct io_buf *, hashq);
186      bp->flags |= B_CACHEQ;
187      io_buf_unlock_queues ();
188    }
189    
190    /* Remove buffer BP from cache (hash lookup table).  */
191    void
192    remcache (struct io_buf *bp)
193    {
194      struct queue_entry *qp;
195    
196      io_buf_lock_queues ();
197      qp = &io_buf_cache_queues [BHASH (bp->vp, bp->blkno)];
198      queue_remove (qp, bp, struct io_buf *, hashq);
199      bp->flags &= ~B_CACHEQ;
200      io_buf_unlock_queues ();
201    }
202    
203    queue_init (&buf_lru_list);  /* Remove buffer BP from free list.  BP should be locked.  */
204    for (i = 0; i < 64; i++)  static void
205      queue_init (& buf_hash_buckets [i]);  remfree (struct io_buf *bp)
206    {
207      if (bp->flags & B_AGE)
208        {
209          queue_remove (&io_buf_queues [BQUEUE_AGE], bp,
210                        struct io_buf *, listq);
211          bp->flags &= ~B_AGE;
212        }
213      else if (bp->flags & B_LRU)
214        {
215          queue_remove (&io_buf_queues [BQUEUE_LRU], bp,
216                        struct io_buf *, listq);
217          bp->flags &= ~B_LRU;
218        }
219      else if (bp->flags & B_FREE)
220        {
221          queue_remove (&io_buf_queues [BQUEUE_FREE], bp,
222                        struct io_buf *, listq);
223          bp->flags &= ~B_FREE;
224        }
225  }  }
226    
227  static inline struct io_buf *  /* When a block is wanted, and it was not found in the cache,
228  search_hash_list (dev_t dev, unsigned int blkno)     a buffer is choosen from the AGE list, or the LRU list if
229       the AGE list was empty.  This blocks if BLOCK_P != 0.  */
230    /* Note that the the buffer returned is busy.  */
231    static struct io_buf *
232    choose_buffer (int block_p)
233  {  {
234    int hash_index = BUF_HASH_FN (dev, blkno);    struct queue_entry *qp;
235    struct io_buf *buf;    struct io_buf *bp;
236      int i;
237    
238    for (buf = (struct io_buf *) queue_first (&buf_hash_buckets [hash_index]);    io_buf_lock_queues ();
239         ! queue_end (&buf_hash_buckets [hash_index], &buf->b_hashq);  
240         buf = (struct io_buf *) queue_next (&buf->b_hashq))    do
241      {      {
242        if (buf->b_dev == dev && buf->b_blkno)        /* We loop through all the lists to try to find a free
243             buffer.  Order: age, lru and free.  */
244          for (i = 0; i < 3; i++)
245          {          {
246            remque (&buf->b_hashq);            qp = & io_buf_queues [i];
247            return buf;            if (! queue_empty (qp))
248                {
249                  queue_remove_first (qp, bp, struct io_buf *, listq);
250                  pthread_mutex_lock (& bp->mutex);
251                  bp->flags |= B_BUSY;
252                  bp->flags &= ~(1 << i);
253                  pthread_mutex_unlock (& bp->mutex);
254                  io_buf_unlock_queues ();
255                  return bp;
256                }
257          }          }
258    
259          /* We did not find any buffer header.  If we should
260             block we do that here.  Unlocks the queue.  */
261          if (block_p)
262            pthread_cond_wait (& io_buf_buffers_wanted,
263                               & io_buf_queues_lock);
264      }      }
265      while (block_p);
266    
267      io_buf_unlock_queues ();
268    return 0;    return 0;
269  }  }
270    
271  /* Grab a new buffer structure.  SIZE is number of data bytes for  /* Find a buffer which is available for use. Select something
272     buffer.  If we fail to allocate, return NULL.  */     from a free list.  Preference is to AGE list, then LRU list.  */
273  struct io_buf *  static struct io_buf *
274  io_buf_get (size_t size)  getnewbuf (void)
275    {
276      struct io_buf *bp = choose_buffer (1);
277      
278      /* Disassociate us from our vnode, if we had one. */
279      if (bp->vp)
280        {
281          io_buf_disassociate (bp);
282        }
283    
284      if (bp->flags & B_CACHEQ)
285        remcache (bp);
286    
287      /* clear out various other fields */
288      bp->flags = B_BUSY | (bp->flags & B_DATA ? B_DATA : 0);
289      bp->dev = NODEV;
290      bp->blkno = bp->lblkno = bp->rblkno = 0;
291      bp->iodone = 0;
292      bp->error = 0;
293      bp->resid = 0;
294      bp->bcount = 0;
295          
296      return bp;
297    }
298    
299    /* Expand or contract the actual memory allocated to BP.
300       BP must be locked.
301    
302       If the buffer shrinks, data is lost, so it's up to the
303       caller to have written it out *first*; this routine will not
304       start a write.  If the buffer grows, it's the callers
305       responsibility to fill out the buffer's additional contents.  */
306    static void
307    allocbuf (struct io_buf *bp, size_t size)
308  {  {
309    struct io_buf *buf;    size_t desired_size;
310      int err;
311    
312      desired_size = round_bsize (size);
313      if (desired_size > MAXBUFSIZE)
314        panic ("allocbuf: buffer larger than 16 pages requested");
315    
316      if (bp->size == desired_size)
317        goto out;
318    
319      /* Check so that we really got any data.  */
320      if (! (bp->flags & B_DATA))
321        {
322          err = vm_allocate (task_self (), (vm_offset_t *) &bp->data,
323                             vm_round_page (desired_size), 1);
324          if (err)
325            panic ("no more memory for buffer");
326          bp->size = size;
327          bp->flags |= (B_DATA | B_INVAL);
328    
329          io_buf_outstanding_data_count += desired_size;
330        }
331    
332      /* If the buffer is greater than the desired size, we
333         release the current data and allocate new data.  */
334      if (bp->size < desired_size)
335        {
336          void *new_data, *old_data = bp->data;
337          size_t old_size = bp->size;
338    
339          err = vm_allocate (task_self (), (vm_offset_t *) &new_data,
340                             vm_round_page (desired_size), 1);
341          io_buf_outstanding_data_count += (bp->size - desired_size);
342    
343          bp->flags |= B_INVAL;
344          memcpy (new_data, old_data, bp->size);
345          bp->data = new_data;
346          bp->size = desired_size;
347    
348          vm_deallocate (task_self (),
349                         (vm_offset_t) old_data, vm_round_page (old_size));
350        }
351    
352      /* If we want a buffer that is less than current size,
353         we just deallcate the unused range.  ??? */
354      if (bp->size > desired_size)
355        {
356          trace_printf ("decreasing");
357    
358          if (vm_round_page (bp->size) != vm_round_page (desired_size))
359            vm_deallocate (task_self (),
360                           (vm_offset_t) bp->data + desired_size,
361                           vm_round_page ((bp->size - desired_size)));
362    
363          io_buf_outstanding_data_count += (bp->size - desired_size);
364          bp->size = desired_size;
365        }
366    
367    buf = (struct io_buf *) malloc (sizeof (struct io_buf));   out:
368    if (! buf)    bp->bcount = size;
369      return NULL;  }
370    
371    buf->b_data    = malloc (size);  /* Wrapper for the function above.  We just lock the buffer
372    buf->b_bufsize = size;     and call it.  */
373    buf->b_dev     = -1;  void
374    return buf;  io_buf_adjust_size (struct io_buf *bp, size_t size)
375    {
376      pthread_mutex_lock (&bp->mutex);
377      allocbuf (bp, size);
378      pthread_mutex_unlock (&bp->mutex);
379  }  }
380    
381  /* Create new buffer that holds DATA.  SIZE is number of bytes  
382     that can be put into DATA.  Returns buf structure.  */  /* Get a block of requested SIZE that is associated with
383       given vnode VP and block offset BLKNO.  If it is found
384       in the block cache, mark it as having been found,
385       make it busy and return it.  Otherwise, return an empty
386       block of the correct size.
387    
388       It is up to the caller to insure that the cached blocks
389       be of the correct size.  */
390  struct io_buf *  struct io_buf *
391  io_buf_init (void *data, size_t size)  io_buf_getblk (struct vnode *vp, off_t blkno, size_t size)
392    {
393      struct io_buf *bp;
394    
395     retry:
396      bp = incore (vp, blkno);
397      if (bp)
398        {
399          pthread_mutex_lock (&bp->mutex);
400          if (bp->flags & B_BUSY)
401            {
402              while (bp->flags & B_BUSY)
403                {
404                  bp->flags |= B_WANTED;
405                  pthread_cond_wait (&bp->cond, &bp->mutex);
406                }
407              pthread_mutex_unlock (&bp->mutex);
408              goto retry;
409            }
410    
411          bp->flags |= B_BUSY;
412          remfree (bp);
413        }
414      else
415        {
416          if ((bp = getnewbuf ()) == NULL)
417            goto retry;
418    
419          pthread_mutex_lock (&bp->mutex);
420          io_buf_associate (vp, bp);
421          bp->blkno = bp->lblkno = bp->rblkno = blkno;
422    
423          addcache (bp);
424        }
425    
426      /* When we come here, we have a locked buffer in BP.  */
427      allocbuf (bp, size);
428      pthread_mutex_unlock (&bp->mutex);
429      return bp;
430    }
431    
432    /* Internal function for releasing BP.  
433       Buffer must be locked.  */
434    static void
435    release_buffer (struct io_buf *bp)
436  {  {
437    struct io_buf *buf;    assert (bp->flags & B_BUSY);
438    
439      /* Determine which queue the buffer should be on,
440         then put it there.  */
441    
442      /* If it's not cacheable, or an error, mark it invalid. */
443      if (bp->flags & (B_NOCACHE | B_ERROR))
444        bp->flags |= B_INVAL;
445    
446      /* Unlock the buffer. */
447      bp->flags &= ~(B_ASYNC|B_BUSY|B_NOCACHE);
448      bp->flags |= B_CACHE;
449    
450      if (bp->size <=0 || bp->flags & B_INVAL || !(bp->flags & B_DATA))
451        {
452          struct queue_entry *qp;
453    
454          /* If it's invalid or empty, dissociate it from its vnode
455             and put on the head of the appropriate queue.  */
456          bp->flags &= ~(B_DONE | B_DELWRITE);
457    
458          if (bp->vp)
459            {
460              io_buf_reassign (bp, bp->vp);
461              io_buf_disassociate (bp);
462            }
463    
464          if (bp->size <= 0 || ! (bp->flags & B_DATA))
465            {
466              qp = &io_buf_queues [BQUEUE_FREE];
467              bp->flags |= B_FREE;
468            }
469          else /* inval data */
470            {
471              qp = &io_buf_queues [BQUEUE_AGE];
472              bp->flags |= B_AGE;
473            }
474    
475          io_buf_lock_queues ();
476          queue_enter_first (qp, bp, struct io_buf *, listq);
477          io_buf_unlock_queues ();
478        }
479      else
480        {
481          io_buf_lock_queues ();
482          queue_enter (&io_buf_queues [BQUEUE_LRU], bp,
483                       struct io_buf *, listq);
484          bp->flags |= B_LRU;
485          io_buf_unlock_queues ();
486        }
487    
488    buf = (struct io_buf *) malloc (sizeof (struct io_buf));    /* Wake up any processes waiting for any
489    if (! buf)       buffer to become free. */
490      return NULL;    pthread_cond_broadcast (&io_buf_buffers_wanted);
491    
492    buf->b_data    = data;    /* Wake up any proceeses waiting for _this_
493    buf->b_bufsize = size;       buffer to become free. */
494    buf->b_dev     = -1;    if (bp->flags & B_WANTED)
495    return buf;      {
496          bp->flags &= ~B_WANTED;
497          pthread_cond_broadcast (&bp->cond);
498        }
499  }  }
500    
501  /* ??? */  /* Release buffer BP on to the free lists.  */
502    void
503    io_buf_release (struct io_buf *bp)
504    {
505      pthread_mutex_lock (&bp->mutex);
506      release_buffer (bp);
507      pthread_mutex_unlock (&bp->mutex);
508    }
509    
510    /* Wait for operations on buffer BP to complete.
511       When they do, extract and return the I/O's error value.  */
512  int  int
513  io_bread (struct vnode *vp, unsigned int blkno, size_t size,  io_buf_wait (struct io_buf *bp)
           struct io_buf **bpp)  
514  {  {
515    struct io_buf *bp;    int err = 0;
   int err;  
516        
517    bp = io_buf_get (size);    pthread_mutex_lock (&bp->mutex);
518    if (! bp)    while (! (bp->flags & B_DONE))
519      return ENOMEM;      pthread_cond_wait (&bp->cond, &bp->mutex);
   
   bp->b_dev    = vp->v_rdev;  
   bp->b_bcount = size;  
   bp->b_blkno  = blkno;  
   bp->b_flags  = B_READ;  
520    
521    err = VOP_STRATEGY (vp, bp);    /* Check for interruption of I/O (e.g. via NFS), then errors. */
522    *bpp = bp;    if (bp->flags & B_EINTR)
523        {
524          bp->flags &= ~B_EINTR;
525          err = EINTR;
526        }
527      else if (bp->flags & B_ERROR)
528        err = bp->error ? bp->error : EIO;
529      pthread_mutex_unlock (&bp->mutex);
530    return err;    return err;
531  }  }
532    
533  /* Release reference to I/O buffer BP.  If Reference counter  /* Mark I/O complete on a buffer.  If a callback has been
534     drops to zero - free resources held by buffer.  */     requested, e.g. the pageout daemon, do so. Otherwise,
535       awaken waiting processes.  */
536  void  void
537  io_brelease (struct io_buf *bp)  io_buf_done (struct io_buf *bp)
538  {  {
539      pthread_mutex_lock (&bp->mutex);
540  #if 0  #if 0
541    int hash_index = BUF_HASH_FN (bp->b_dev, bp->b_blkno);    if (bp->flags & B_DONE)
542    printf ("hash index %d for dev %d blkno %d\n", hash_index,      panic ("io_buf_done: already done");
           bp->b_dev, bp->b_blkno);  
   fflush (stdout);  
   
   enqueue_tail (&buf_hash_buckets [hash_index], &bp->b_hashq);  
 #else  
   free (bp->b_data);  
   free (bp);  
543  #endif  #endif
544      bp->flags |= B_DONE;
545      
546      /* Wakeup readers.  */
547    #if 0
548      if (! (bp->flags & B_READ))
549        pthread_cond_signal (&bp->cond);
550    #endif  
551    
552      /* If necessary, call the callout.  
553         Mark callout done.  */
554      if (bp->iodone)
555        {
556          void (*callback) (struct io_buf *) = bp->iodone;
557          bp->iodone = 0;
558          (*callback) (bp);
559        }
560      else
561        {
562          /* If async, release.  */
563          if (bp->flags & B_ASYNC)
564            release_buffer (bp);
565          else /* Wakeup the buffer.  */
566            {
567              bp->flags &= ~B_WANTED;
568              pthread_cond_broadcast (&bp->cond);
569            }
570        }
571      pthread_mutex_unlock (&bp->mutex);
572    }
573    
574    /* Mark buffer dirty, but do not release it.  */
575    void
576    io_buf_mark_dirty (struct io_buf *bp)
577    {
578      pthread_mutex_lock (&bp->mutex);
579      
580      if (! (bp->flags & B_DELWRITE))
581        {
582          bp->flags |= B_DELWRITE;
583          io_buf_reassign (bp, bp->vp);
584        }
585    }
586    
587    /* Do actuall reading.  */
588    static struct io_buf *
589    common_read (struct vnode *vp, off_t blkno, size_t size,
590                 struct ucred *ucred, int async_p)
591    {
592      struct io_buf *bp;
593      
594      bp = io_buf_getblk (vp, blkno, size);
595    
596      /* If buffer does not have data valid, start a read.
597         Note that if buffer is B_INVAL, getblk() won't
598         return it.  Therefore, it's valid if it's I/O has
599         completed or been delayed.  */
600      if (! (bp->flags & (B_DONE | B_DELWRITE)))
601        {
602          bp->flags |= B_READ | (async_p ? B_ASYNC : 0);
603    
604          /* Start I/O for the buffer. */
605          VOP_STRATEGY (bp); /* ??? ucred */
606        }
607      else if (async_p)
608        io_buf_release (bp);
609    
610      return bp;
611    }
612    
613    /* Read disk block BLKNO (SIZE bytes) from VP.  
614       Return buffer in *BPP.  */
615    int
616    io_buf_read (struct vnode *vp, off_t blkno, size_t size,
617                 struct ucred *ucred, struct io_buf **bpp)
618    {
619      struct io_buf *bp;
620    
621      /* Get buffer for block. */
622      bp = *bpp = common_read (vp, blkno, size, ucred, 0);
623    
624      /* Delayed write buffers are found in the cache and have
625         valid contents.  Also, B_ERROR is not set, otherwise
626         getblk() would not have returned them.  */
627      if (bp->flags & (B_DONE|B_DELWRITE))
628        return 0;
629    
630      /* Otherwise, we had to start a read for it;
631         wait until it's valid and return the result.  */
632      return io_buf_wait (bp);
633    }
634    
635    /* Read-ahead multiple disk blocks.
636       The first, BLKNO, is sync, the rest async.  Block offsets
637       and buffer sizes listed in RABLKS and RASIZES.  Number of
638       read-ahead buffers in NRABLKS.  */
639    int
640    io_buf_read_ahead (struct vnode *vp, off_t blkno, size_t size,
641                       off_t rablks[], size_t rasizes[], int nrablks,
642                       struct ucred *ucred, struct io_buf **bpp)
643    {
644      struct io_buf *bp;
645      int i;
646    
647      bp = *bpp = common_read (vp, blkno, size, ucred, 0);
648    
649      /* For each of the read-ahead blocks;
650         start a read, if necessary.  */
651      for (i = 0; i < nrablks; i++)
652        {
653          /* If it's in the cache, just go on to next one. */
654          if (incore (vp, rablks [i]))
655            continue;
656    
657          /* Get a buffer for the read-ahead block */
658          (void) common_read (vp, rablks[i], rasizes[i], ucred, 1);
659        }
660    
661      /* Delayed write buffers are found in the cache and have
662         valid contents. Also, B_ERROR is not set, otherwise
663         getblk() would not have returned them.  */
664      if (bp->flags & (B_DONE|B_DELWRITE))
665        return 0;
666    
667      /* Otherwise, we had to start a read for it;
668         wait until it's valid and return the result.  */
669      return io_buf_wait (bp);
670    }
671    
672    
673    /* Initialize the I/O buffer system.  */
674    void
675    io_buf_init (void)
676    {
677      struct io_buf *bp;
678      int i;
679    
680      for (i = 0; i < 3; i++)
681        queue_init (& io_buf_queues [i]);
682      for (i = 0; i < 256; i++)
683        queue_init (& io_buf_cache_queues [i]);
684    
685      for (i = 0; i < NBUFFERS; i++)
686        {
687          bp = (struct io_buf *) malloc (sizeof (struct io_buf));
688          assert (bp);
689          memset (bp, 0, sizeof (struct io_buf));
690    
691          queue_enter (& io_buf_queues [BQUEUE_FREE], bp,
692                       struct io_buf *, listq);
693        }
694  }  }

Legend:
Removed from v.1.1.1.1  
changed lines
  Added in v.1.2

savannah-hackers-public@gnu.org
ViewVC Help
Powered by ViewVC 1.1.26