/* Copyright 2002 Johan Rydberg, jrydberg@rtmk.org. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include #include #include #include #include #include #include "io-vfs.h" #include "io-vnode.h" #include "fs-spec.h" enum vtype iftovt_tab[16] = { VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, }; const int vttoif_tab[9] = { 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, /* S_IFLNK */ 0, /* S_IFSOCK */ 0, S_IFIFO, S_IFMT, }; /* Notes about locking: Nodes is always locked before the free list. When selecting a node from the free list, we must try the lock. If node was already locked, we try the next node on the list. */ pthread_mutex_t vnode_list_lock = PTHREAD_MUTEX_INITIALIZER; struct queue_entry vnode_free_list = queue_ctor (vnode_free_list); /* We maintain a cache for fast lookup of vnodes. We hash on VFS and INO number. Buckets protected by free list lock. */ struct queue_entry vnode_cache_buckets [256]; void init_vnodes (void) { int i; for (i = 0; i < 256; i++) queue_init (&vnode_cache_buckets [i]); } /* Maximum number of vnodes the system can use. Until we have reaced this limit we allocate new vnodes. */ size_t max_vnodes = 1024; static size_t nvnodes; /* Grab a vnode. If we not yet have allocated max number of nodes we allocate a new one, otherwise we look at the free (LRU) list. The returned vnode is locked. */ static struct vnode * grab_vnode (void) { struct vnode *vp, *locked_vp = 0; if (nvnodes == max_vnodes) { retry_scan: pthread_mutex_lock (&vnode_list_lock); queue_iterate (&vnode_free_list, vp, struct vnode *, v_listq) { if (pthread_mutex_trylock (&vp->v_lock)) continue; locked_vp = vp; break; } if (locked_vp) queue_remove (&vnode_free_list, vp, struct vnode *, v_listq); pthread_mutex_unlock (&vnode_list_lock); if (! locked_vp) goto retry_scan; VOP_RECLAIM (locked_vp); return locked_vp; } nvnodes++; vp = (struct vnode *) malloc (sizeof (struct vnode)); memset (vp, 0, sizeof (struct vnode)); queue_init (&vp->v_cleanbufs); queue_init (&vp->v_dirtybufs); pthread_mutex_init (&vp->v_lock, NULL); pthread_cond_init (&vp->v_cond, NULL); return vp; } /* Fetch new vnode to be used by file system VFS. We set operation vector ot VOPS. Return vnode in *VPP. */ int getnewvnode (struct vfs *vfs, struct vnode_ops *vops, struct vnode **vpp) { struct vnode *vp = grab_vnode (); vp->v_type = VNON; vp->v_usecount = 1; vp->v_data = 0; vp->v_vfsp = vfs; vp->v_ops = vops; queue_enter (&vfs->vfs_vnl, vp, struct vnode *, v_vfsq); pthread_mutex_unlock (&vp->v_lock); *vpp = vp; return 0; } /* Release reference to vnode VP. If reference counter drops to zero, we enter the vnode on the free list. */ void vrelease (struct vnode *vp) { pthread_mutex_lock (&vp->v_lock); vp->v_usecount--; if (vp->v_usecount > 0) { pthread_mutex_unlock (&vp->v_lock); return; } assert (vp->v_usecount != (unsigned short)-1); /* Insert at tail of LRU list. */ pthread_mutex_lock (&vnode_list_lock); queue_enter (&vnode_free_list, vp, struct vnode *, v_listq); pthread_mutex_unlock (&vnode_list_lock); pthread_mutex_unlock (&vp->v_lock); VOP_INACTIVE (vp); } /* Obtain a reference to vnode VP. The node will be removed from any free list. */ void vhold (struct vnode *vp) { pthread_mutex_lock (&vp->v_lock); if (vp->v_usecount == 0) { pthread_mutex_lock (&vnode_list_lock); queue_remove (&vnode_free_list, vp, struct vnode *, v_listq); pthread_mutex_unlock (&vnode_list_lock); } vp->v_usecount++; pthread_mutex_unlock (&vp->v_lock); } /* Allocate a Vnode that is attached to device DEV. Vnode is returned in VPP. Vnode should be in VFS. */ int vdevvp (struct vfs *vfs, dev_t dev, enum vtype type, struct vnode **vpp) { int err = getnewvnode (&specfs_mount, &specfs_vnops, vpp); if (! err) { (*vpp)->v_type = type; (*vpp)->v_rdev = dev; } return err; } /* Do usual access checking. FILE_MODE, UID and GID are from the vnode in question. ACC_MODE and CRED are from the VOP_ACCESS parameter list. */ int vaccess (enum vtype type, mode_t file_mode, uid_t uid, gid_t gid, mode_t acc_mode, struct ucred *cred) { mode_t mask = 0; /* Super-user always gets read/write access, but execute access depends on at least one execute bit being set. */ if (cred->cr_uid == 0) { if ((acc_mode & VEXEC) && type != VDIR && (file_mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0) return EACCES; return 0; } /* Otherwise, check the owner. */ if (cred->cr_uid == uid) { if (acc_mode & VEXEC) mask |= S_IXUSR; if (acc_mode & VREAD) mask |= S_IRUSR; if (acc_mode & VWRITE) mask |= S_IWUSR; return ((file_mode & mask) == mask ? 0 : EACCES); } #if 0 /* Otherwise, check the groups. */ if (cred->cr_gid == gid || groupmember(gid, cred)) { if (acc_mode & VEXEC) mask |= S_IXGRP; if (acc_mode & VREAD) mask |= S_IRGRP; if (acc_mode & VWRITE) mask |= S_IWGRP; return ((file_mode & mask) == mask ? 0 : EACCES); } /* Otherwise, check everyone else. */ if (acc_mode & VEXEC) mask |= S_IXOTH; if (acc_mode & VREAD) mask |= S_IROTH; if (acc_mode & VWRITE) mask |= S_IWOTH; return ((file_mode & mask) == mask ? 0 : EACCES); #else return 0; #endif }