/* Pager! Copyright 2002 Johan Rydberg, jrydberg@rtmk.org. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include #include #include #include #include "pager.h" #include "io-vfs.h" #include "io-vnode.h" #include "io-buf.h" #include "ihash.h" #include "nova-intern.h" ihash_t _pager_lookup_table; /* Create a new pager that uses VNODE as backing storage. PROT is protection allowed. If PRECIOUS_P is true, data will be flushed back to VN. */ struct pager * pager_create (struct vnode *vn, vm_prot_t prot, int precious_p) { struct pager *pager; int err; if (! _pager_lookup_table) ihash_create (&_pager_lookup_table); pager = (struct pager *) malloc (sizeof (struct pager)); if (! pager) return 0; err = port_allocate (task_self (), &pager->pager_port); if (err) { free (pager); return 0; } pager->vnode = vn; pager->precious_p = precious_p; pager->lock_prot = prot; trace_printf ("just created pager with port %d", pager->pager_port); ihash_add (_pager_lookup_table, (int) pager->pager_port, (void *) pager, 0); port_move_member (task_self (), pager->pager_port, ux_pset); return pager; } /* Look up pager that is associated with PAGER_PORT. */ struct pager * pager_lookup (rtmk_port_t pager_port) { return (struct pager *) ihash_find (_pager_lookup_table, (int) pager_port); } /* Return receive right for pager. This is the right that we receive paging requests on. */ rtmk_port_t pager_get_right (struct pager *pager) { return pager->pager_port; } /* Initialize mapping of PAGER. PAGER_REQUEST is request port for mapping. */ void pager_memory_object_init (rtmk_port_t pager_port, rtmk_port_t pager_request) { struct pager *pager = pager_lookup (pager_port); if (! pager) { trace_printf ("??? no pager for port %d, req is %d", pager_port, pager_request); return; } else trace_printf ("PAGER INIT %d", pager_request); /* We make the object ready right away. No need to wait. */ memory_object_set_attributes (pager_request, 1, 0, MEMORY_OBJECT_COPY_DELAY); } /* Data is requested for PAGER. OFFSET is offset into backing storage object. SIZE is number of bytes the kernel want (we may supply more data if we want). ACCESS is what access they want. */ void pager_memory_object_data_request (rtmk_port_t pager_port, rtmk_port_t pager_request, vm_offset_t offset, vm_size_t size, vm_prot_t access) { struct pager *pager = pager_lookup (pager_port); struct io_buf *bp; int err, nrun; off_t blkno; if (! pager) { trace_printf ("??? no pager for port %d", pager_port); return; } assert (size == VM_PAGE_SIZE); assert ((offset & VM_PAGE_MASK) == 0); #if 0 trace_printf ("pager %d pager request %d offset %x", pager_port, pager_request, offset); #endif /* We lookup the block number for OFFSET by calling VOP_BMAP. After that we try to read the block(s) into buffer. */ err = VOP_BMAP (pager->vnode, (off_t) offset, &blkno, &nrun); if (err) goto no_data; if (nrun == 0) nrun = 1; /* nrun = 1; */ bp = io_buf_getblk (pager->vnode, blkno, size); /* If buffer does not have data valid, start a read. Note that if buffer is B_INVAL, getblk() won't return it. Therefore, it's valid if it's I/O has completed or been delayed. */ if (bp->flags & (B_DONE|B_DELWRITE) && !(bp->flags & B_INVAL)) { trace_printf ("was in cache"); memory_object_data_supply (pager_request, offset, (vm_offset_t) bp->data, VM_PAGE_SIZE, 0, VM_PROT_NONE, pager->precious_p); } /* If data was invalid we have to (re-)read it all. We use a subbuffer for this and call VOP_STRATEGY direct. */ else /* we treat is at invalid. */ { size_t iosize, bsize = 1024; struct io_buf subbuf; off_t off = offset; subbuf.mutex = PTHREAD_MUTEX_INITIALIZER; subbuf.cond = PTHREAD_COND_INITIALIZER; subbuf.data = bp->data; for (; size; size -= iosize) { /* We can read more than NRUN blocks, and we do not want to read more that SIZE bytes. */ nrun = MIN (nrun, VM_PAGE_SIZE / bsize); iosize = MIN (nrun * bsize, size); subbuf.blkno = blkno; subbuf.bcount = iosize; subbuf.size = iosize; subbuf.flags = B_BUSY|B_READ; subbuf.iodone = 0; subbuf.vp = pager->vnode; /* Do the actual reading. */ err = VOP_STRATEGY (&subbuf); if (err) { trace_printf ("error is %d", err); goto release_without_data; } /* Before we start all over again, we check so that we did not reach the end of the buffer with this read. If we did, we're done, just continue. */ if ((size - iosize) <= 0) continue; off += iosize; /* We lookup the block number for OFFSET by calling VOP_BMAP. After that we try to read the block(s) into buffer. */ err = VOP_BMAP (pager->vnode, off, &blkno, &nrun); if (err) goto release_without_data; if (nrun == 0) nrun = 1; /* nrun = 1; */ subbuf.data += iosize; } bp->flags &= ~B_INVAL; if (! (bp->flags & B_DONE)) io_buf_done (bp); /* All of our data is in BP->data, just supply it it to the kernel. This is the last thing we do. */ err = memory_object_data_supply (pager_request, offset, (vm_offset_t) bp->data, VM_PAGE_SIZE, 0, VM_PROT_NONE, pager->precious_p); } /* We are done with everything. Data should have been provided to the kernel. We just have to release the buffer and return. */ io_buf_release (bp); return; release_without_data: io_buf_release (bp); no_data: trace_printf ("no data"); memory_object_data_unavail (pager_request, offset, VM_PAGE_SIZE); } void pager_memory_object_write_data (rtmk_port_t pager, rtmk_port_t pager_request, vm_offset_t offset, void *address, vm_size_t size, int dirty_p, int kernel_copy_p) { vm_deallocate (task_self (), (vm_offset_t) address, size); }