diff -urNX ../exclude linux-2.3.99pre8-linus/fs/fcntl.c linux-2.3.99pre8+lock/fs/fcntl.c --- linux-2.3.99pre8-linus/fs/fcntl.c Sun Mar 12 12:03:14 2000 +++ linux-2.3.99pre8+lock/fs/fcntl.c Sat May 20 04:43:39 2000 @@ -13,6 +15,8 @@ #include extern int sock_fcntl (struct file *, unsigned int cmd, unsigned long arg); +extern int fcntl_setlease(unsigned int fd, long arg); +extern int fcntl_getlease(unsigned int fd); /* * locate_fd finds a free file descriptor in the open_fds fdset, @@ -181,11 +185,9 @@ filp = fget(fd); if (!filp) goto out; - err = 0; - lock_kernel(); + err = -EINVAL; switch (cmd) { case F_DUPFD: - err = -EINVAL; if (arg < NR_OPEN) { get_file(filp); err = dupfd(filp, arg); @@ -195,6 +197,7 @@ err = FD_ISSET(fd, current->files->close_on_exec); break; case F_SETFD: + err = 0; if (arg&1) FD_SET(fd, current->files->close_on_exec); else @@ -204,14 +207,14 @@ err = filp->f_flags; break; case F_SETFL: + lock_kernel(); err = setfl(fd, filp, arg); + unlock_kernel(); break; case F_GETLK: err = fcntl_getlk(fd, (struct flock *) arg); break; case F_SETLK: - err = fcntl_setlk(fd, cmd, (struct flock *) arg); - break; case F_SETLKW: err = fcntl_setlk(fd, cmd, (struct flock *) arg); break; @@ -226,11 +229,14 @@ err = filp->f_owner.pid; break; case F_SETOWN: + lock_kernel(); filp->f_owner.pid = arg; filp->f_owner.uid = current->uid; filp->f_owner.euid = current->euid; + err = 0; if (S_ISSOCK (filp->f_dentry->d_inode->i_mode)) err = sock_fcntl (filp, F_SETOWN, arg); + unlock_kernel(); break; case F_GETSIG: err = filp->f_owner.signum; @@ -238,21 +244,19 @@ case F_SETSIG: /* arg == 0 restores default behaviour. */ if (arg < 0 || arg > _NSIG) { - err = -EINVAL; break; } err = 0; filp->f_owner.signum = arg; break; - default: - /* sockets need a few special fcntls. */ - err = -EINVAL; - if (S_ISSOCK (filp->f_dentry->d_inode->i_mode)) - err = sock_fcntl (filp, cmd, arg); + case F_GETLEASE: + err = fcntl_getlease(fd); + break; + case F_SETLEASE: + err = fcntl_setlease(fd, arg); break; } fput(filp); - unlock_kernel(); out: return err; } diff -urNX ../exclude linux-2.3.99pre8-linus/fs/lockd/clntlock.c linux-2.3.99pre8+lock/fs/lockd/clntlock.c --- linux-2.3.99pre8-linus/fs/lockd/clntlock.c Tue Apr 25 02:13:25 2000 +++ linux-2.3.99pre8+lock/fs/lockd/clntlock.c Mon May 15 08:31:52 2000 @@ -157,33 +157,38 @@ } } +/* lockd is way too incestuous with fs/locks.c */ + static int reclaimer(void *ptr) { struct nlm_host *host = (struct nlm_host *) ptr; struct nlm_wait *block; - struct file_lock *fl; - struct inode *inode; + struct list_head *tmp; /* This one ensures that our parent doesn't terminate while the * reclaim is in progress */ lock_kernel(); lockd_up(); + down(&file_lock_sem); /* First, reclaim all locks that have been granted previously. */ - do { - for (fl = file_lock_table; fl; fl = fl->fl_nextlink) { - inode = fl->fl_file->f_dentry->d_inode; - if (inode->i_sb->s_magic == NFS_SUPER_MAGIC - && nlm_cmp_addr(NFS_ADDR(inode), &host->h_addr) - && fl->fl_u.nfs_fl.state != host->h_state - && (fl->fl_u.nfs_fl.flags & NFS_LCK_GRANTED)) { - fl->fl_u.nfs_fl.flags &= ~ NFS_LCK_GRANTED; - nlmclnt_reclaim(host, fl); - break; - } +restart: + tmp = file_lock_list.next; + while (tmp != &file_lock_list) { + struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); + struct inode *inode = fl->fl_file->f_dentry->d_inode; + if (inode->i_sb->s_magic == NFS_SUPER_MAGIC && + nlm_cmp_addr(NFS_ADDR(inode), &host->h_addr) && + fl->fl_u.nfs_fl.state != host->h_state && + (fl->fl_u.nfs_fl.flags & NFS_LCK_GRANTED)) { + fl->fl_u.nfs_fl.flags &= ~ NFS_LCK_GRANTED; + nlmclnt_reclaim(host, fl); + goto restart; } - } while (fl); + tmp = tmp->next; + } + up(&file_lock_sem); host->h_reclaiming = 0; wake_up(&host->h_gracewait); diff -urNX ../exclude linux-2.3.99pre8-linus/fs/lockd/svclock.c linux-2.3.99pre8+lock/fs/lockd/svclock.c --- linux-2.3.99pre8-linus/fs/lockd/svclock.c Tue Apr 25 02:13:26 2000 +++ linux-2.3.99pre8+lock/fs/lockd/svclock.c Mon May 15 08:31:52 2000 @@ -347,7 +347,7 @@ /* Append to list of blocked */ nlmsvc_insert_block(block, NLM_NEVER); - if (!block->b_call.a_args.lock.fl.fl_prevblock) { + if (!list_empty(&block->b_call.a_args.lock.fl.fl_block)) { /* Now add block to block list of the conflicting lock if we haven't done so. */ dprintk("lockd: blocking on this lock.\n"); diff -urNX ../exclude linux-2.3.99pre8-linus/fs/locks.c linux-2.3.99pre8+lock/fs/locks.c --- linux-2.3.99pre8-linus/fs/locks.c Mon May 15 07:58:45 2000 +++ linux-2.3.99pre8+lock/fs/locks.c Sat May 20 04:40:16 2000 @@ -101,72 +101,231 @@ * Some adaptations for NFS support. * Olaf Kirch (okir@monad.swb.de), Dec 1996, * - * Fixed /proc/locks interface so that we can't overrun the buffer we are handed. + * Fixed /proc/locks interface so that we can't overrun the buffer we are + * handed. * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997. + * + * Added process limits to prevent attacks. + * Use slab allocator instead of kmalloc/kfree. + * Use generic list implementation from . + * Ensure the lock notifier method is always called when a lock is deleted. + * Sped up posix_locks_deadlock by only considering blocked locks. + * No longer require kernel_lock -- file_lock_sem protects us instead. + * Matthew Wilcox , March, 2000. */ #include #include #include +#include +#include #include -static int flock_make_lock(struct file *filp, struct file_lock *fl, - unsigned int cmd); -static int posix_make_lock(struct file *filp, struct file_lock *fl, - struct flock *l); -static int flock_locks_conflict(struct file_lock *caller_fl, - struct file_lock *sys_fl); -static int posix_locks_conflict(struct file_lock *caller_fl, - struct file_lock *sys_fl); -static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl); -static int flock_lock_file(struct file *filp, struct file_lock *caller, - unsigned int wait); -static int posix_locks_deadlock(struct file_lock *caller, - struct file_lock *blocker); - -static struct file_lock *locks_empty_lock(void); -static struct file_lock *locks_init_lock(struct file_lock *, - struct file_lock *); -static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl); -static void locks_delete_lock(struct file_lock **thisfl_p, unsigned int wait); -static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx); - -static void locks_insert_block(struct file_lock *blocker, struct file_lock *waiter); -static void locks_delete_block(struct file_lock *blocker, struct file_lock *waiter); -static void locks_wake_up_blocks(struct file_lock *blocker, unsigned int wait); - -struct file_lock *file_lock_table = NULL; - -/* Allocate a new lock, and initialize its fields from fl. - * The lock is not inserted into any lists until locks_insert_lock() or - * locks_insert_block() are called. - */ -static inline struct file_lock *locks_alloc_lock(struct file_lock *fl) +DECLARE_MUTEX(file_lock_sem); + +#define acquire_fl_sem() do { \ +/* printk(KERN_DEBUG __FUNCTION__ ": acquiring file_lock_sem\n");*/\ + down(&file_lock_sem); \ +/* printk(KERN_DEBUG __FUNCTION__ ": acquired file_lock_sem\n");*/ \ +} while (0) + +#define release_fl_sem() up(&file_lock_sem) + +LIST_HEAD(file_lock_list); +static LIST_HEAD(blocked_list); + +static kmem_cache_t *filelock_cache; + +/* Allocate an empty lock structure. */ +static struct file_lock *locks_alloc_lock(int account) { - return locks_init_lock(locks_empty_lock(), fl); + struct file_lock *lock; + if (account && current->locks >= current->rlim[RLIMIT_LOCKS].rlim_cur) + return NULL; + lock = kmem_cache_alloc(filelock_cache, SLAB_KERNEL); + if (lock) { + current->locks += 1; + } + return lock; } -/* Free lock not inserted in any queue. - */ -static inline void locks_free_lock(struct file_lock *fl) +/* Free an unused lock */ +static void locks_free_lock(struct file_lock *fl) { + if (fl == NULL) { + BUG(); + return; + } + current->locks -= 1; if (waitqueue_active(&fl->fl_wait)) panic("Attempting to free lock with active wait queue"); - if (fl->fl_nextblock != NULL || fl->fl_prevblock != NULL) + if (!list_empty(&fl->fl_block)) panic("Attempting to free lock with active block list"); - - kfree(fl); - return; + + if (!list_empty(&fl->fl_link)) + panic("Attempting to free lock on active lock list"); + + kmem_cache_free(filelock_cache, fl); } -/* Check if two locks overlap each other. +/* + * Initialises the fields of the file lock which are invariant for + * free file_locks. */ -static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) +static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags) { - return ((fl1->fl_end >= fl2->fl_start) && - (fl2->fl_end >= fl1->fl_start)); + struct file_lock *lock = (struct file_lock *) foo; + + if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) != + SLAB_CTOR_CONSTRUCTOR) + return; + + lock->fl_next = NULL; + INIT_LIST_HEAD(&lock->fl_link); + INIT_LIST_HEAD(&lock->fl_block); + init_waitqueue_head(&lock->fl_wait); +} + +/* Initialize a new lock from an existing file_lock structure. */ +static struct file_lock *locks_copy_lock(struct file_lock *new, + struct file_lock *fl) +{ + new->fl_owner = fl->fl_owner; + new->fl_pid = fl->fl_pid; + new->fl_file = fl->fl_file; + new->fl_flags = fl->fl_flags; + new->fl_type = fl->fl_type; + new->fl_start = fl->fl_start; + new->fl_end = fl->fl_end; + new->fl_notify = fl->fl_notify; + new->fl_insert = fl->fl_insert; + new->fl_remove = fl->fl_remove; + new->fl_u = fl->fl_u; + return new; +} + +static inline int flock_translate_cmd(int cmd) { + switch (cmd &~ LOCK_NB) { + case LOCK_SH: + return F_RDLCK; + case LOCK_EX: + return F_WRLCK; + case LOCK_UN: + return F_UNLCK; + } + return -EINVAL; +} + +/* Fill in a file_lock structure with an appropriate FLOCK lock. */ +static struct file_lock *flock_make_lock(struct file *filp, unsigned int type) +{ + struct file_lock *fl = locks_alloc_lock(1); + if (fl == NULL) + return NULL; + + fl->fl_owner = NULL; + fl->fl_file = filp; + fl->fl_flags = FL_FLOCK; + fl->fl_type = type; + fl->fl_start = 0; + fl->fl_end = OFFSET_MAX; + fl->fl_notify = NULL; + fl->fl_insert = NULL; + fl->fl_remove = NULL; + + return fl; +} + +static int assign_type(struct file_lock *fl, int type) +{ + switch (type) { + case F_RDLCK: + case F_WRLCK: + case F_UNLCK: + fl->fl_type = type; + break; + default: + return 0; + } + return 1; +} + +/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX + * style lock. + */ +static int posix_make_lock(struct file *filp, struct file_lock *fl, + struct flock *l) +{ + loff_t start; + + switch (l->l_whence) { + case 0: /*SEEK_SET*/ + start = 0; + break; + case 1: /*SEEK_CUR*/ + start = filp->f_pos; + break; + case 2: /*SEEK_END*/ + start = filp->f_dentry->d_inode->i_size; + break; + default: + return 0; + } + + start += l->l_start; + if ((start < 0) || (l->l_len < 0)) + return 0; + fl->fl_start = start; /* we record the absolute position */ + + fl->fl_end = start + l->l_len - 1; + if (l->l_len == 0) + fl->fl_end = OFFSET_MAX; + + fl->fl_owner = current->files; + fl->fl_pid = current->pid; + fl->fl_file = filp; + fl->fl_flags = FL_POSIX; + fl->fl_notify = NULL; + fl->fl_insert = NULL; + fl->fl_remove = NULL; + + return assign_type(fl, l->l_type); +} + +/* Allocate a file_lock initialised to this type of lease */ +static struct file_lock *lease_alloc(struct file *filp, int type) +{ + struct file_lock *fl = locks_alloc_lock(1); + if (fl == NULL) + return NULL; + + fl->fl_owner = current->files; + fl->fl_pid = current->pid; + + fl->fl_file = filp; + fl->fl_flags = FL_LEASE; + if (assign_type(fl, type) == 0) + goto bad; + fl->fl_start = 0; + fl->fl_end = OFFSET_MAX; + fl->fl_notify = NULL; + fl->fl_insert = NULL; + fl->fl_remove = NULL; + + return fl; + +bad: + locks_free_lock(fl); + return NULL; +} + +/* Check if two locks overlap each other. */ +static int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) +{ + return (fl1->fl_end >= fl2->fl_start) && + (fl2->fl_end >= fl1->fl_start); } /* @@ -174,86 +333,108 @@ * N.B. Do we need the test on PID as well as owner? * (Clone tasks should be considered as one "owner".) */ -static inline int -locks_same_owner(struct file_lock *fl1, struct file_lock *fl2) +static int locks_same_owner(struct file_lock *fl1, struct file_lock *fl2) { return (fl1->fl_owner == fl2->fl_owner) && (fl1->fl_pid == fl2->fl_pid); } -/* Insert waiter into blocker's block list. - * We use a circular list so that processes can be easily woken up in - * the order they blocked. The documentation doesn't require this but - * it seems like the reasonable thing to do. +/* Determine if lock sys_fl blocks lock caller_fl. Common functionality + * checks for overlapping locks and shared/exclusive status. */ -static void locks_insert_block(struct file_lock *blocker, - struct file_lock *waiter) +static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) { - struct file_lock *prevblock; + if (!locks_overlap(caller_fl, sys_fl)) + return 0; - if (waiter->fl_prevblock) { - printk(KERN_ERR "locks_insert_block: remove duplicated lock " - "(pid=%d %Ld-%Ld type=%d)\n", - waiter->fl_pid, (long long)waiter->fl_start, - (long long)waiter->fl_end, waiter->fl_type); - locks_delete_block(waiter->fl_prevblock, waiter); - } - - if (blocker->fl_prevblock == NULL) - /* No previous waiters - list is empty */ - prevblock = blocker; - else - /* Previous waiters exist - add to end of list */ - prevblock = blocker->fl_prevblock; - - prevblock->fl_nextblock = waiter; - blocker->fl_prevblock = waiter; - waiter->fl_nextblock = blocker; - waiter->fl_prevblock = prevblock; - - return; + switch (caller_fl->fl_type) { + case F_RDLCK: + return (sys_fl->fl_type == F_WRLCK); + + case F_WRLCK: + return 1; + + default: + printk("locks_conflict(): impossible lock type - %d\n", + caller_fl->fl_type); + break; + } + return 0; /* This should never happen */ } -/* Remove waiter from blocker's block list. - * When blocker ends up pointing to itself then the list is empty. +/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific + * checking before calling the locks_conflict(). */ -static void locks_delete_block(struct file_lock *blocker, - struct file_lock *waiter) +static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) { - struct file_lock *nextblock; - struct file_lock *prevblock; - - nextblock = waiter->fl_nextblock; - prevblock = waiter->fl_prevblock; + /* POSIX locks owned by the same process do not conflict with + * each other. + */ + if (!(sys_fl->fl_flags & FL_POSIX) || + locks_same_owner(caller_fl, sys_fl)) + return 0; - if (nextblock == NULL) - return; - - nextblock->fl_prevblock = prevblock; - prevblock->fl_nextblock = nextblock; + return locks_conflict(caller_fl, sys_fl); +} + +/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific + * checking before calling the locks_conflict(). + */ +static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) +{ + /* FLOCK locks referring to the same filp do not conflict with + * each other. + */ + if (!(sys_fl->fl_flags & FL_FLOCK) || + (caller_fl->fl_file == sys_fl->fl_file)) + return 0; - waiter->fl_prevblock = waiter->fl_nextblock = NULL; - if (blocker->fl_nextblock == blocker) - /* No more locks on blocker's blocked list */ - blocker->fl_prevblock = blocker->fl_nextblock = NULL; - return; + return locks_conflict(caller_fl, sys_fl); } -/* The following two are for the benefit of lockd. +/* Insert file lock fl into an inode's lock list at the position indicated + * by pos. At the same time add the lock to the global file lock list. */ -void -posix_block_lock(struct file_lock *blocker, struct file_lock *waiter) +static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) { - locks_insert_block(blocker, waiter); - return; + list_add(&fl->fl_link, &file_lock_list); + + /* insert into file's list */ + fl->fl_next = *pos; + *pos = fl; + + if (fl->fl_insert) + fl->fl_insert(fl); } -void -posix_unblock_lock(struct file_lock *waiter) +/* Remove waiter from blocker's block list. + * When blocker ends up pointing to itself then the list is empty. + */ +static inline void locks_delete_block(struct file_lock *waiter) +{ + list_del(&waiter->fl_block); + INIT_LIST_HEAD(&waiter->fl_block); + list_del(&waiter->fl_link); + INIT_LIST_HEAD(&waiter->fl_link); +} + +/* Insert waiter into blocker's block list. + * We use a circular list so that processes can be easily woken up in + * the order they blocked. The documentation doesn't require this but + * it seems like the reasonable thing to do. + */ +static inline void locks_insert_block(struct file_lock *blocker, + struct file_lock *waiter) { - if (waiter->fl_prevblock) - locks_delete_block(waiter->fl_prevblock, waiter); - return; + if (!list_empty(&waiter->fl_block)) { + printk(KERN_ERR "locks_insert_block: removing duplicated lock " + "(pid=%d %Ld-%Ld type=%d)\n", waiter->fl_pid, + waiter->fl_start, waiter->fl_end, waiter->fl_type); + locks_delete_block(waiter); + } + list_add_tail(&waiter->fl_block, &blocker->fl_block); + list_add(&waiter->fl_link, &blocked_list); + waiter->fl_next = blocker; } /* Wake up processes blocked waiting for blocker. @@ -262,56 +443,132 @@ */ static void locks_wake_up_blocks(struct file_lock *blocker, unsigned int wait) { - struct file_lock *waiter; - - while ((waiter = blocker->fl_nextblock) != NULL) { + while (!list_empty(&blocker->fl_block)) { + struct file_lock *waiter = list_entry(&blocker->fl_block, + struct file_lock, fl_block); /* N.B. Is it possible for the notify function to block?? */ - if (waiter->fl_notify) + if (waiter->fl_notify) { + printk(KERN_EMERG "fl_notify = %p\n", waiter->fl_notify); waiter->fl_notify(waiter); + } else { + printk(KERN_EMERG "fl_notify = NULL\n"); + } wake_up(&waiter->fl_wait); if (wait) { /* Let the blocked process remove waiter from the * block list when it gets scheduled. */ +printk("locks_wake_up_blocks: scheduling\n"); current->policy |= SCHED_YIELD; schedule(); } else { /* Remove waiter from the block list, because by the * time it wakes up blocker won't exist any more. */ - locks_delete_block(blocker, waiter); + locks_delete_block(waiter); } } - return; } -/* flock() system call entry point. Apply a FL_FLOCK style lock to - * an open file descriptor. +/* Delete a lock and then free it. + * Remove our lock from the lock lists, wake up processes that are blocked + * waiting for this lock, notify the FS that the lock has been cleared and + * finally free the lock. */ -asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) +static void locks_delete_lock(struct file_lock **thisfl_p, unsigned int wait) { - struct file_lock file_lock; - struct file *filp; - int error; + int (*lock)(struct file *, int, struct file_lock *); + struct file_lock *fl = *thisfl_p; - lock_kernel(); - error = -EBADF; - filp = fget(fd); - if (!filp) - goto out; - error = -EINVAL; - if (!flock_make_lock(filp, &file_lock, cmd)) - goto out_putf; - error = -EBADF; - if ((file_lock.fl_type != F_UNLCK) && !(filp->f_mode & 3)) - goto out_putf; - error = flock_lock_file(filp, &file_lock, - (cmd & (LOCK_UN | LOCK_NB)) ? 0 : 1); -out_putf: - fput(filp); -out: - unlock_kernel(); - return (error); + *thisfl_p = fl->fl_next; + fl->fl_next = NULL; + + list_del(&fl->fl_link); + INIT_LIST_HEAD(&fl->fl_link); + + if (fl->fl_remove) + fl->fl_remove(fl); + + locks_wake_up_blocks(fl, wait); + lock = fl->fl_file->f_op->lock; + if (lock) { + fl->fl_type = F_UNLCK; + lock(fl->fl_file, F_SETLK, fl); + } + locks_free_lock(fl); +} + +int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, struct semaphore *sem, int timeout) +{ + int result = 0; + wait_queue_t wait; + init_waitqueue_entry(&wait, current); + + __add_wait_queue(fl_wait, &wait); + current->state = TASK_INTERRUPTIBLE; + up(sem); + if (timeout == 0) { + schedule(); + } else { + result = schedule_timeout(timeout); + } + if (signal_pending(current)) { + result = -ERESTARTSYS; + } + down(sem); + remove_wait_queue(fl_wait, &wait); + current->state = TASK_RUNNING; + return result; +} + +static int locks_block_on(struct file_lock *blocker, struct file_lock *waiter) +{ + int result; + locks_insert_block(blocker, waiter); + result = interruptible_sleep_on_locked(&waiter->fl_wait, &file_lock_sem, 0); + locks_delete_block(waiter); + return result; +} + +/* This function tests for deadlock condition before putting a process to + * sleep. The detection scheme is no longer recursive. Recursive was neat, + * but dangerous - we risked stack corruption if the lock data was bad, or + * if the recursion was too deep for any other reason. + * + * We rely on the fact that a task can only be on one lock's wait queue + * at a time. When we find blocked_task on a wait queue we can re-search + * with blocked_task equal to that queue's owner, until either blocked_task + * isn't found, or blocked_task is found on a queue owned by my_task. + * + * Note: the above assumption may not be true when handling lock requests + * from a broken NFS client. But broken NFS clients have a lot more to + * worry about than proper deadlock detection anyway... --okir + */ +static int posix_locks_deadlock(struct file_lock *caller_fl, + struct file_lock *block_fl) +{ + struct file_lock *fl = block_fl; + struct list_head *tmp = blocked_list.next; + fl_owner_t caller_owner, blocked_owner; + unsigned int caller_pid, blocked_pid; + + caller_owner = caller_fl->fl_owner; + caller_pid = caller_fl->fl_pid; + +next_task: + blocked_owner = fl->fl_owner; + blocked_pid = fl->fl_pid; + if (caller_owner == blocked_owner && caller_pid == blocked_pid) + return 1; + while (tmp != &blocked_list) { + fl = list_entry(tmp, struct file_lock, fl_link); + tmp = tmp->next; + if (fl->fl_owner != blocked_owner || fl->fl_pid != blocked_pid) + continue; + fl = fl->fl_next; + goto next_task; + } + return 0; } /* Report the first existing lock that would conflict with l. @@ -320,7 +577,7 @@ int fcntl_getlk(unsigned int fd, struct flock *l) { struct file *filp; - struct file_lock *fl,file_lock; + struct file_lock *fl, file_lock; struct flock flock; int error; @@ -343,11 +600,13 @@ error = filp->f_op->lock(filp, F_GETLK, &file_lock); if (error < 0) goto out_putf; - else if (error == LOCK_USE_CLNT) - /* Bypass for NFS with no locking - 2.0.36 compat */ - fl = posix_test_lock(filp, &file_lock); - else - fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); + + if (error == LOCK_USE_CLNT) { + /* Bypass for NFS with no locking - 2.0.36 compat */ + fl = posix_test_lock(filp, &file_lock); + } else { + fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); + } } else { fl = posix_test_lock(filp, &file_lock); } @@ -377,7 +636,7 @@ int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l) { struct file *filp; - struct file_lock file_lock; + struct file_lock *fl = locks_alloc_lock(0); struct flock flock; struct inode *inode; int error; @@ -389,8 +648,7 @@ if (copy_from_user(&flock, l, sizeof(flock))) goto out; - /* Get arguments and validate them ... - */ + /* Get arguments and validate them ... */ error = -EBADF; filp = fget(fd); @@ -419,7 +677,7 @@ } error = -EINVAL; - if (!posix_make_lock(filp, &file_lock, &flock)) + if (!posix_make_lock(filp, fl, &flock)) goto out_putf; error = -EBADF; @@ -442,8 +700,8 @@ static int count = 0; if (!count) { count=1; - printk(KERN_WARNING - "fcntl_setlk() called by process %d (%s) with broken flock() emulation\n", + printk(KERN_WARNING "fcntl_setlk() called by process %d (%s) " + "with broken flock() emulation\n", current->pid, current->comm); } } @@ -457,409 +715,187 @@ } if (filp->f_op->lock != NULL) { - error = filp->f_op->lock(filp, cmd, &file_lock); + error = filp->f_op->lock(filp, cmd, fl); if (error < 0) goto out_putf; } - error = posix_lock_file(filp, &file_lock, cmd == F_SETLKW); + error = posix_lock_file(filp, fl, cmd == F_SETLKW); out_putf: fput(filp); out: + locks_free_lock(fl); return error; } /* * This function is called when the file is being removed - * from the task's fd array. + * from the task's fd array. For POSIX locks we free all + * locks on this file for the given task. */ void locks_remove_posix(struct file *filp, fl_owner_t owner) { - struct inode * inode = filp->f_dentry->d_inode; - struct file_lock file_lock, *fl; - struct file_lock **before; - - /* - * For POSIX locks we free all locks on this file for the given task. - */ -repeat: + struct inode *inode = filp->f_dentry->d_inode; + struct file_lock *fl, **before; + acquire_fl_sem(); before = &inode->i_flock; while ((fl = *before) != NULL) { - if ((fl->fl_flags & FL_POSIX) && fl->fl_owner == owner) { - int (*lock)(struct file *, int, struct file_lock *); - lock = filp->f_op->lock; - if (lock) { - file_lock = *fl; - file_lock.fl_type = F_UNLCK; - } - locks_delete_lock(before, 0); - if (lock) { - lock(filp, F_SETLK, &file_lock); - /* List may have changed: */ - goto repeat; - } - continue; - } - before = &fl->fl_next; - } -} - -/* - * This function is called on the last close of an open file. - */ -void locks_remove_flock(struct file *filp) -{ - struct inode * inode = filp->f_dentry->d_inode; - struct file_lock file_lock, *fl; - struct file_lock **before; - -repeat: - before = &inode->i_flock; - while ((fl = *before) != NULL) { - if ((fl->fl_flags & FL_FLOCK) && fl->fl_file == filp) { - int (*lock)(struct file *, int, struct file_lock *); - lock = NULL; - if (filp->f_op) - lock = filp->f_op->lock; - if (lock) { - file_lock = *fl; - file_lock.fl_type = F_UNLCK; - } - locks_delete_lock(before, 0); - if (lock) { - lock(filp, F_SETLK, &file_lock); - /* List may have changed: */ - goto repeat; - } - continue; - } - before = &fl->fl_next; - } -} - -struct file_lock * -posix_test_lock(struct file *filp, struct file_lock *fl) -{ - struct file_lock *cfl; - - for (cfl = filp->f_dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) { - if (!(cfl->fl_flags & FL_POSIX)) - continue; - if (posix_locks_conflict(cfl, fl)) - break; - } - - return (cfl); -} - -int locks_mandatory_locked(struct inode *inode) -{ - fl_owner_t owner = current->files; - struct file_lock *fl; - - /* - * Search the lock list for this inode for any POSIX locks. - */ - lock_kernel(); - for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { - if (!(fl->fl_flags & FL_POSIX)) - continue; - if (fl->fl_owner != owner) - break; - } - unlock_kernel(); - return fl ? -EAGAIN : 0; -} - -int locks_mandatory_area(int read_write, struct inode *inode, - struct file *filp, loff_t offset, - size_t count) -{ - struct file_lock *fl; - struct file_lock tfl; - int error; - - memset(&tfl, 0, sizeof(tfl)); - - tfl.fl_file = filp; - tfl.fl_flags = FL_POSIX | FL_ACCESS; - tfl.fl_owner = current->files; - tfl.fl_pid = current->pid; - init_waitqueue_head(&tfl.fl_wait); - tfl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK; - tfl.fl_start = offset; - tfl.fl_end = offset + count - 1; - - error = 0; - lock_kernel(); - -repeat: - /* Search the lock list for this inode for locks that conflict with - * the proposed read/write. - */ - for (fl = inode->i_flock; ; fl = fl->fl_next) { - error = 0; - if (!fl) - break; - if (!(fl->fl_flags & FL_POSIX)) - continue; - /* Block for writes against a "read" lock, - * and both reads and writes against a "write" lock. - */ - if (posix_locks_conflict(&tfl, fl)) { - error = -EAGAIN; - if (filp && (filp->f_flags & O_NONBLOCK)) - break; - error = -ERESTARTSYS; - if (signal_pending(current)) - break; - error = -EDEADLK; - if (posix_locks_deadlock(&tfl, fl)) - break; - - locks_insert_block(fl, &tfl); - interruptible_sleep_on(&tfl.fl_wait); - locks_delete_block(fl, &tfl); - - /* - * If we've been sleeping someone might have - * changed the permissions behind our back. - */ - if ((inode->i_mode & (S_ISGID | S_IXGRP)) != S_ISGID) - break; - goto repeat; - } - } - unlock_kernel(); - return error; -} - -/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX - * style lock. - */ -static int posix_make_lock(struct file *filp, struct file_lock *fl, - struct flock *l) -{ - loff_t start; - - memset(fl, 0, sizeof(*fl)); - - init_waitqueue_head(&fl->fl_wait); - fl->fl_flags = FL_POSIX; - - switch (l->l_type) { - case F_RDLCK: - case F_WRLCK: - case F_UNLCK: - fl->fl_type = l->l_type; - break; - default: - return (0); - } - - switch (l->l_whence) { - case 0: /*SEEK_SET*/ - start = 0; - break; - case 1: /*SEEK_CUR*/ - start = filp->f_pos; - break; - case 2: /*SEEK_END*/ - start = filp->f_dentry->d_inode->i_size; - break; - default: - return (0); - } - - if (((start += l->l_start) < 0) || (l->l_len < 0)) - return (0); - fl->fl_end = start + l->l_len - 1; - if (l->l_len > 0 && fl->fl_end < 0) - return (0); - fl->fl_start = start; /* we record the absolute position */ - if (l->l_len == 0) - fl->fl_end = OFFSET_MAX; - - fl->fl_file = filp; - fl->fl_owner = current->files; - fl->fl_pid = current->pid; - - return (1); -} - -/* Verify a call to flock() and fill in a file_lock structure with - * an appropriate FLOCK lock. - */ -static int flock_make_lock(struct file *filp, struct file_lock *fl, - unsigned int cmd) -{ - memset(fl, 0, sizeof(*fl)); - - init_waitqueue_head(&fl->fl_wait); - - switch (cmd & ~LOCK_NB) { - case LOCK_SH: - fl->fl_type = F_RDLCK; - break; - case LOCK_EX: - fl->fl_type = F_WRLCK; - break; - case LOCK_UN: - fl->fl_type = F_UNLCK; - break; - default: - return (0); + if ((fl->fl_flags & FL_POSIX) && fl->fl_owner == owner) { + locks_delete_lock(before, 0); + } + before = &fl->fl_next; } - - fl->fl_flags = FL_FLOCK; - fl->fl_start = 0; - fl->fl_end = OFFSET_MAX; - fl->fl_file = filp; - fl->fl_owner = NULL; - - return (1); + release_fl_sem(); } -/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific - * checking before calling the locks_conflict(). +/* + * This function is called on the last close of an open file. */ -static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) +void locks_remove_flock(struct file *filp) { - /* POSIX locks owned by the same process do not conflict with - * each other. - */ - if (!(sys_fl->fl_flags & FL_POSIX) || - locks_same_owner(caller_fl, sys_fl)) - return (0); + struct inode * inode = filp->f_dentry->d_inode; + struct file_lock *fl, **before; - return (locks_conflict(caller_fl, sys_fl)); + acquire_fl_sem(); + before = &inode->i_flock; + while ((fl = *before) != NULL) { + if ((fl->fl_flags & FL_FLOCK) && fl->fl_file == filp) { + locks_delete_lock(before, 0); + } + before = &fl->fl_next; + } + release_fl_sem(); } -/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific - * checking before calling the locks_conflict(). - */ -static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) +struct file_lock * +posix_test_lock(struct file *filp, struct file_lock *fl) { - /* FLOCK locks referring to the same filp do not conflict with - * each other. - */ - if (!(sys_fl->fl_flags & FL_FLOCK) || - (caller_fl->fl_file == sys_fl->fl_file)) - return (0); + struct file_lock *cfl; + + acquire_fl_sem(); + for (cfl = filp->f_dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) { + if (!(cfl->fl_flags & FL_POSIX)) + continue; + if (posix_locks_conflict(cfl, fl)) + break; + } + release_fl_sem(); - return (locks_conflict(caller_fl, sys_fl)); + return cfl; } -/* Determine if lock sys_fl blocks lock caller_fl. Common functionality - * checks for overlapping locks and shared/exclusive status. - */ -static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) +int locks_mandatory_locked(struct inode *inode) { - if (!locks_overlap(caller_fl, sys_fl)) - return (0); - - switch (caller_fl->fl_type) { - case F_RDLCK: - return (sys_fl->fl_type == F_WRLCK); - - case F_WRLCK: - return (1); + fl_owner_t owner = current->files; + struct file_lock *fl; - default: - printk("locks_conflict(): impossible lock type - %d\n", - caller_fl->fl_type); - break; + /* + * Search the lock list for this inode for any POSIX locks. + */ + acquire_fl_sem(); + for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { + if (!(fl->fl_flags & FL_POSIX)) + continue; + if (fl->fl_owner != owner) + break; } - return (0); /* This should never happen */ + release_fl_sem(); + return fl ? -EAGAIN : 0; } -/* This function tests for deadlock condition before putting a process to - * sleep. The detection scheme is no longer recursive. Recursive was neat, - * but dangerous - we risked stack corruption if the lock data was bad, or - * if the recursion was too deep for any other reason. - * - * We rely on the fact that a task can only be on one lock's wait queue - * at a time. When we find blocked_task on a wait queue we can re-search - * with blocked_task equal to that queue's owner, until either blocked_task - * isn't found, or blocked_task is found on a queue owned by my_task. - * - * Note: the above assumption may not be true when handling lock requests - * from a broken NFS client. But broken NFS clients have a lot more to - * worry about than proper deadlock detection anyway... --okir - */ -static int posix_locks_deadlock(struct file_lock *caller_fl, - struct file_lock *block_fl) +int locks_mandatory_area(int read_write, struct inode *inode, + struct file *filp, loff_t offset, + size_t count) { struct file_lock *fl; - struct file_lock *bfl; - void *caller_owner, *blocked_owner; - unsigned int caller_pid, blocked_pid; + struct file_lock *tfl = locks_alloc_lock(0); + int error; - caller_owner = caller_fl->fl_owner; - caller_pid = caller_fl->fl_pid; - blocked_owner = block_fl->fl_owner; - blocked_pid = block_fl->fl_pid; + tfl->fl_owner = current->files; + tfl->fl_pid = current->pid; + tfl->fl_file = filp; + tfl->fl_flags = FL_POSIX | FL_ACCESS; + tfl->fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK; + tfl->fl_start = offset; + tfl->fl_end = offset + count - 1; -next_task: - if (caller_owner == blocked_owner && caller_pid == blocked_pid) - return (1); - for (fl = file_lock_table; fl != NULL; fl = fl->fl_nextlink) { - if (fl->fl_owner == NULL || fl->fl_nextblock == NULL) + error = 0; + acquire_fl_sem(); + + /* Search the lock list for this inode for locks that conflict with + * the proposed read/write. + */ + for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { + if (!(fl->fl_flags & FL_POSIX)) continue; - for (bfl = fl->fl_nextblock; bfl != fl; bfl = bfl->fl_nextblock) { - if (bfl->fl_owner == blocked_owner && - bfl->fl_pid == blocked_pid) { - if (fl->fl_owner == caller_owner && - fl->fl_pid == caller_pid) { - return (1); - } - blocked_owner = fl->fl_owner; - blocked_pid = fl->fl_pid; - goto next_task; - } - } + if (fl->fl_start > tfl->fl_end) + break; + error = 0; + if (!posix_locks_conflict(tfl, fl)) + continue; + + error = -EAGAIN; + if (filp && (filp->f_flags & O_NONBLOCK)) + break; + error = -EDEADLK; + if (posix_locks_deadlock(tfl, fl)) + break; + + error = locks_block_on(fl, tfl); + if (error != 0) + break; + + /* + * If we've been sleeping someone might have + * changed the permissions behind our back. + */ + if ((inode->i_mode & (S_ISGID | S_IXGRP)) != S_ISGID) + break; + fl = inode->i_flock; } - return (0); + release_fl_sem(); + locks_free_lock(tfl); + return error; } -/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks at - * the head of the list, but that's secret knowledge known only to the next - * two functions. +/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks + * at the head of the list, but that's secret knowledge known only to + * flock_lock_file and posix_lock_file. */ -static int flock_lock_file(struct file *filp, struct file_lock *caller, +static int flock_lock_file(struct file *filp, unsigned int lock_type, unsigned int wait) { struct file_lock *fl; struct file_lock *new_fl = NULL; struct file_lock **before; - struct inode * inode = filp->f_dentry->d_inode; + struct inode *inode = filp->f_dentry->d_inode; int error, change; - int unlock = (caller->fl_type == F_UNLCK); + int unlock = (lock_type == F_UNLCK); - /* - * If we need a new lock, get it in advance to avoid races. - */ if (!unlock) { error = -ENOLCK; - new_fl = locks_alloc_lock(caller); - if (!new_fl) - goto out; + new_fl = flock_make_lock(filp, lock_type); + if (new_fl == NULL) + return error; } error = 0; search: change = 0; before = &inode->i_flock; - while (((fl = *before) != NULL) && (fl->fl_flags & FL_FLOCK)) { - if (caller->fl_file == fl->fl_file) { - if (caller->fl_type == fl->fl_type) + for (;;) { + fl = *before; + if ((fl == NULL) || ((fl->fl_flags & FL_FLOCK) == 0)) + break; + if (filp == fl->fl_file) { + if (lock_type == fl->fl_type) goto out; change = 1; break; } before = &fl->fl_next; } - /* change means that we are changing the type of an existing lock, or + /* change means that we are changing the type of an existing lock, * or else unlocking it. */ if (change) { @@ -874,11 +910,6 @@ if (unlock) goto out; -repeat: - /* Check signals each time we start */ - error = -ERESTARTSYS; - if (signal_pending(current)) - goto out; for (fl = inode->i_flock; (fl != NULL) && (fl->fl_flags & FL_FLOCK); fl = fl->fl_next) { if (!flock_locks_conflict(new_fl, fl)) @@ -886,10 +917,12 @@ error = -EAGAIN; if (!wait) goto out; - locks_insert_block(fl, new_fl); - interruptible_sleep_on(&new_fl->fl_wait); - locks_delete_block(fl, new_fl); - goto repeat; + + error = locks_block_on(fl, new_fl); + if (error != 0) + goto out; + + fl = inode->i_flock; } locks_insert_lock(&inode->i_flock, new_fl); new_fl = NULL; @@ -901,7 +934,13 @@ return error; } -/* Add a POSIX style lock to a file. +/** + * posix_lock_file: + * @filp: The file to apply the lock to + * @caller: The lock to be applied + * @wait: 1 to retry automatically, 0 to return -EAGAIN + * + * Add a POSIX style lock to a file. * We merge adjacent locks whenever possible. POSIX locks are sorted by owner * task, then by starting address * @@ -924,18 +963,21 @@ struct inode * inode = filp->f_dentry->d_inode; int error, added = 0; + if (caller->fl_notify != NULL) + printk("notify = %p\n", caller->fl_notify); + + acquire_fl_sem(); /* * We may need two file_lock structures for this operation, * so we get them in advance to avoid races. */ - new_fl = locks_empty_lock(); - new_fl2 = locks_empty_lock(); + new_fl = locks_alloc_lock(0); + new_fl2 = locks_alloc_lock(0); error = -ENOLCK; /* "no luck" */ if (!(new_fl && new_fl2)) goto out; if (caller->fl_type != F_UNLCK) { - repeat: for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { if (!(fl->fl_flags & FL_POSIX)) continue; @@ -947,13 +989,12 @@ error = -EDEADLK; if (posix_locks_deadlock(caller, fl)) goto out; - error = -ERESTARTSYS; - if (signal_pending(current)) + + error = locks_block_on(fl, caller); + if (error != 0) goto out; - locks_insert_block(fl, caller); - interruptible_sleep_on(&caller->fl_wait); - locks_delete_block(fl, caller); - goto repeat; + + fl = inode->i_flock; } } @@ -972,7 +1013,6 @@ !locks_same_owner(caller, fl))) { before = &fl->fl_next; } - /* Process locks with this owner. */ while ((fl = *before) && locks_same_owner(caller, fl)) { @@ -992,22 +1032,23 @@ * lock yielding from the lower start address of both * locks to the higher end address. */ - if (fl->fl_start > caller->fl_start) + if (fl->fl_start > caller->fl_start) { fl->fl_start = caller->fl_start; - else + } else { caller->fl_start = fl->fl_start; - if (fl->fl_end < caller->fl_end) + } + if (fl->fl_end < caller->fl_end) { fl->fl_end = caller->fl_end; - else + } else { caller->fl_end = fl->fl_end; + } if (added) { locks_delete_lock(before, 0); continue; } caller = fl; added = 1; - } - else { + } else { /* Processing for different lock types is a bit * more complex. */ @@ -1048,9 +1089,8 @@ added = 1; } } - /* Go on to next lock. - */ - next_lock: + /* Go on to next lock. */ +next_lock: before = &fl->fl_next; } @@ -1058,7 +1098,7 @@ if (!added) { if (caller->fl_type == F_UNLCK) goto out; - locks_init_lock(new_fl, caller); + locks_copy_lock(new_fl, caller); locks_insert_lock(before, new_fl); new_fl = NULL; } @@ -1068,7 +1108,7 @@ * so we have to use the second new lock (in this * case, even F_UNLCK may fail!). */ - left = locks_init_lock(new_fl2, right); + left = locks_copy_lock(new_fl2, right); locks_insert_lock(before, left); new_fl2 = NULL; } @@ -1080,106 +1120,211 @@ locks_wake_up_blocks(left, 0); } out: - /* - * Free any unused locks. (They haven't - * ever been used, so we use kfree().) - */ + release_fl_sem(); + /* Free any unused locks. */ if (new_fl) - kfree(new_fl); + locks_free_lock(new_fl); if (new_fl2) - kfree(new_fl2); + locks_free_lock(new_fl2); return error; } /* - * Allocate an empty lock structure. We can use GFP_KERNEL now that - * all allocations are done in advance. + * get_lease has checked there _is_ a lease on this */ -static struct file_lock *locks_empty_lock(void) +int __get_lease(struct inode *inode, unsigned int mode) { - /* Okay, let's make a new file_lock structure... */ - return ((struct file_lock *) kmalloc(sizeof(struct file_lock), - GFP_KERNEL)); + int error = 0, future; + struct file_lock *new_fl, *flock = inode->i_flock; + + if (flock->fl_owner == current->files) + return 0; + + acquire_fl_sem(); + + while (flock->fl_type & F_INPROGRESS) { + error = interruptible_sleep_on_locked(&flock->fl_wait, &file_lock_sem, 0); + if (error != 0) + goto out; + + flock = inode->i_flock; + if ((flock == NULL) || (flock->fl_flags != FL_LEASE)) + goto out; + } + + if (mode & FMODE_WRITE) { + /* If we want write access, we have to revoke any lease. */ + future = F_UNLCK | F_INPROGRESS; + } else if (flock->fl_type & F_WRLCK) { + /* Downgrade the exclusive lease to a read-only lease. */ + future = F_RDLCK | F_INPROGRESS; + } else { + /* the existing lease was read-only, so we can read too. */ + goto out; + } + + new_fl = flock; + do { + new_fl->fl_type = future; + new_fl = new_fl->fl_next; + } while (new_fl->fl_flags & FL_LEASE); + + inode_send_fasync(inode); + error = 30 * HZ; +restart: + error = interruptible_sleep_on_locked(&flock->fl_wait, &file_lock_sem, error); + if (error < 0) + goto out; + + if (error == 0) { + /* We timed out. Unilaterally break the lease. */ + printk(KERN_DEBUG "lease timed out"); + locks_delete_lock(&inode->i_flock, 0); + } else { + flock = inode->i_flock; + if (flock->fl_flags & FL_LEASE) + goto restart; + error = 0; + } + +out: + release_fl_sem(); + return error; } -/* - * Initialize a new lock from an existing file_lock structure. - */ -static struct file_lock *locks_init_lock(struct file_lock *new, - struct file_lock *fl) +int fcntl_getlease(unsigned int fd) { - if (new) { - memset(new, 0, sizeof(*new)); - new->fl_owner = fl->fl_owner; - new->fl_pid = fl->fl_pid; - init_waitqueue_head(&new->fl_wait); - new->fl_file = fl->fl_file; - new->fl_flags = fl->fl_flags; - new->fl_type = fl->fl_type; - new->fl_start = fl->fl_start; - new->fl_end = fl->fl_end; - new->fl_notify = fl->fl_notify; - new->fl_insert = fl->fl_insert; - new->fl_remove = fl->fl_remove; - new->fl_u = fl->fl_u; + struct file_lock *fl; + struct file *filp; + int error; + + error = -EBADF; + filp = fget(fd); + if (!filp) + goto out; + + fl = filp->f_dentry->d_inode->i_flock; + if ((fl == NULL) || ((fl->fl_flags & FL_LEASE) == 0)) { + error = F_UNLCK; + } else { + error = fl->fl_type; } - return new; +out: + return error; } -/* Insert file lock fl into an inode's lock list at the position indicated - * by pos. At the same time add the lock to the global file lock list. - */ -static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) +/* We already had a lease on this file; just change its type */ +static int lease_modify(struct file_lock **before, int arg, int fd, struct file *filp) { - fl->fl_nextlink = file_lock_table; - fl->fl_prevlink = NULL; - if (file_lock_table != NULL) - file_lock_table->fl_prevlink = fl; - file_lock_table = fl; - fl->fl_next = *pos; /* insert into file's list */ - *pos = fl; + if (assign_type(*before, arg) != 0) + return -EINVAL; - if (fl->fl_insert) - fl->fl_insert(fl); + wake_up(&(*before)->fl_wait); - return; + if (arg == F_UNLCK) { + locks_delete_lock(before, 0); + file_fasync(fd, filp, 0); + } + return 0; } -/* Delete a lock and free it. - * First remove our lock from the active lock lists. Then call - * locks_wake_up_blocks() to wake up processes that are blocked - * waiting for this lock. Finally free the lock structure. - */ -static void locks_delete_lock(struct file_lock **thisfl_p, unsigned int wait) +int fcntl_setlease(unsigned int fd, long arg) { - struct file_lock *thisfl; - struct file_lock *prevfl; - struct file_lock *nextfl; - - thisfl = *thisfl_p; - *thisfl_p = thisfl->fl_next; + struct file_lock *fl, *my_fl, **before; + struct file *filp; + struct inode *inode; + int error, read_allowed = 1, write_allowed = 1; + + error = -EBADF; + filp = fget(fd); + if (!filp) + goto out; + + inode = filp->f_dentry->d_inode; + before = &inode->i_flock; - prevfl = thisfl->fl_prevlink; - nextfl = thisfl->fl_nextlink; + acquire_fl_sem(); - if (nextfl != NULL) - nextfl->fl_prevlink = prevfl; + while ((fl = *before) != NULL) { + if (fl->fl_flags != FL_LEASE) + break; + if (fl->fl_owner == current->files) + my_fl = fl; + write_allowed = 0; + if ((fl->fl_type & F_RDLCK) == 0) + read_allowed = 0; + before = &fl->fl_next; + } - if (prevfl != NULL) - prevfl->fl_nextlink = nextfl; - else - file_lock_table = nextfl; + if ((fl != NULL) && (fl->fl_owner == current->files)) { + error = lease_modify(before, arg, fd, filp); + goto out_unlock; + } - if (thisfl->fl_remove) - thisfl->fl_remove(thisfl); + fl = lease_alloc(filp, arg); - locks_wake_up_blocks(thisfl, wait); - locks_free_lock(thisfl); + error = -EINVAL; + if (fl == NULL) + goto out_unlock; + + error = -EAGAIN; + if ((fl->fl_type == F_RDLCK && read_allowed) + || (fl->fl_type == F_WRLCK && write_allowed)) + goto out_freelock; + + error = file_fasync(fd, filp, 1); + if (error < 0) + goto out_freelock; + + fl->fl_next = *before; + *before = fl; + +out_unlock: + release_fl_sem(); +out: + return error; + +out_freelock: + *before = fl->fl_next; + locks_free_lock(fl); + goto out_unlock; +} + +/* flock() system call entry point. Apply a FL_FLOCK style lock to + * an open file descriptor. + */ +asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) +{ + struct file *filp; + int error, type; + + error = -EBADF; + filp = fget(fd); + if (!filp) + goto out; + + error = flock_translate_cmd(cmd); + if (error < 0) + goto out; + type = error; - return; + error = -EBADF; + if ((type != F_UNLCK) && !(filp->f_mode & 3)) + goto out_putf; + + acquire_fl_sem(); + error = flock_lock_file(filp, type, + (cmd & (LOCK_UN | LOCK_NB)) ? 0 : 1); + release_fl_sem(); +out_putf: + fput(filp); +out: + return error; } -static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx) +/* Functions to report lock usage in /proc/locks */ + +static void lock_get_status(char * out, struct file_lock *fl, int id, char *pfx) { struct inode *inode; @@ -1192,21 +1337,20 @@ (IS_MANDLOCK(inode) && (inode->i_mode & (S_IXGRP | S_ISGID)) == S_ISGID) ? "MANDATORY" : "ADVISORY "); - } - else { + } else { out += sprintf(out, "FLOCK ADVISORY "); } out += sprintf(out, "%s ", (fl->fl_type == F_RDLCK) ? "READ " : "WRITE"); out += sprintf(out, "%d %s:%ld %Ld %Ld ", fl->fl_pid, kdevname(inode->i_dev), inode->i_ino, - (long long)fl->fl_start, (long long)fl->fl_end); + fl->fl_start, fl->fl_end); sprintf(out, "%08lx %08lx %08lx %08lx %08lx\n", - (long)fl, (long)fl->fl_prevlink, (long)fl->fl_nextlink, - (long)fl->fl_next, (long)fl->fl_nextblock); + (long)fl, (long)fl->fl_link.prev, (long)fl->fl_link.next, + (long)fl->fl_next, (long)fl->fl_block.next); } -static void move_lock_status(char **p, off_t* pos, off_t offset) +static void move_lock_status(char **p, off_t *pos, off_t offset) { int len; len = strlen(*p); @@ -1230,35 +1374,65 @@ int get_locks_status(char *buffer, char **start, off_t offset, int length) { - struct file_lock *fl; - struct file_lock *bfl; + struct list_head *tmp; char *q = buffer; off_t pos = 0; - int i; + int i = 0; - for (fl = file_lock_table, i = 1; fl != NULL; fl = fl->fl_nextlink, i++) { - lock_get_status(q, fl, i, ""); + acquire_fl_sem(); + tmp = file_lock_list.next; + while (tmp != &file_lock_list) { + struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); + struct list_head *btmp = fl->fl_block.next; + + lock_get_status(q, fl, ++i, ""); move_lock_status(&q, &pos, offset); - if(pos >= offset+length) + if (pos >= offset+length) goto done; - if ((bfl = fl->fl_nextblock) == NULL) - continue; - do { + while (btmp != &fl->fl_block) { + struct file_lock *bfl = list_entry(btmp, + struct file_lock, fl_block); lock_get_status(q, bfl, i, " ->"); move_lock_status(&q, &pos, offset); if(pos >= offset+length) goto done; - } while ((bfl = bfl->fl_nextblock) != fl); + btmp = btmp->next; + } + tmp = tmp->next; } done: + release_fl_sem(); *start = buffer; - if(q-buffer < length) + if (q - buffer < length) return (q-buffer); return length; } +/* The following two are for the benefit of lockd. + */ +void +posix_block_lock(struct file_lock *blocker, struct file_lock *waiter) +{ + acquire_fl_sem(); + locks_insert_block(blocker, waiter); + release_fl_sem(); +} +void +posix_unblock_lock(struct file_lock *waiter) +{ + acquire_fl_sem(); + locks_delete_block(waiter); + release_fl_sem(); +} +void __init filelock_init(void) +{ + filelock_cache = kmem_cache_create("file lock cache", + sizeof(struct file_lock), 0, 0, init_once, NULL); + if (!filelock_cache) + panic("cannot create file lock slab cache"); +} diff -urNX ../exclude linux-2.3.99pre8-linus/fs/namei.c linux-2.3.99pre8+lock/fs/namei.c --- linux-2.3.99pre8-linus/fs/namei.c Mon May 15 07:59:25 2000 +++ linux-2.3.99pre8+lock/fs/namei.c Sat May 20 04:42:06 2000 @@ -997,6 +1000,11 @@ goto exit; } + /* + * Ensure there are no outstanding leases on the file. + */ + get_lease(inode, flag); + if (flag & O_TRUNC) { error = get_write_access(inode); if (error) diff -urNX ../exclude linux-2.3.99pre8-linus/include/asm-alpha/resource.h linux-2.3.99pre8+lock/include/asm-alpha/resource.h --- linux-2.3.99pre8-linus/include/asm-alpha/resource.h Thu Dec 9 16:29:05 1999 +++ linux-2.3.99pre8+lock/include/asm-alpha/resource.h Mon May 15 08:31:52 2000 @@ -15,8 +15,9 @@ #define RLIMIT_AS 7 /* address space limit(?) */ #define RLIMIT_NPROC 8 /* max number of processes */ #define RLIMIT_MEMLOCK 9 /* max locked-in-memory address space */ +#define RLIMIT_LOCKS 10 /* maximum file locks held */ -#define RLIM_NLIMITS 10 +#define RLIM_NLIMITS 11 /* * SuS says limits have to be unsigned. Fine, it's unsigned, but @@ -39,6 +40,7 @@ {LONG_MAX, LONG_MAX}, /* RLIMIT_AS */ \ {LONG_MAX, LONG_MAX}, /* RLIMIT_NPROC */ \ {LONG_MAX, LONG_MAX}, /* RLIMIT_MEMLOCK */ \ + { 100, LONG_MAX}, /* RLIMIT_LOCKS */ \ } #endif /* __KERNEL__ */ diff -urNX ../exclude linux-2.3.99pre8-linus/include/asm-arm/resource.h linux-2.3.99pre8+lock/include/asm-arm/resource.h --- linux-2.3.99pre8-linus/include/asm-arm/resource.h Thu Dec 9 16:29:05 1999 +++ linux-2.3.99pre8+lock/include/asm-arm/resource.h Mon May 15 08:31:52 2000 @@ -15,8 +15,9 @@ #define RLIMIT_NOFILE 7 /* max number of open files */ #define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */ #define RLIMIT_AS 9 /* address space limit */ +#define RLIMIT_LOCKS 10 /* maximum file locks held */ -#define RLIM_NLIMITS 10 +#define RLIM_NLIMITS 11 /* * SuS says limits have to be unsigned. @@ -38,6 +39,7 @@ { INR_OPEN, INR_OPEN }, \ { LONG_MAX, LONG_MAX }, \ { LONG_MAX, LONG_MAX }, \ + { 100, LONG_MAX}, /* RLIMIT_LOCKS */ \ } #endif /* __KERNEL__ */ diff -urNX ../exclude linux-2.3.99pre8-linus/include/asm-i386/fcntl.h linux-2.3.99pre8+lock/include/asm-i386/fcntl.h --- linux-2.3.99pre8-linus/include/asm-i386/fcntl.h Tue Dec 14 03:53:27 1999 +++ linux-2.3.99pre8+lock/include/asm-i386/fcntl.h Mon May 15 08:31:52 2000 @@ -34,6 +34,9 @@ #define F_GETOWN 9 /* for sockets. */ #define F_SETSIG 10 /* for sockets. */ #define F_GETSIG 11 /* for sockets. */ +#define F_SETLEASE 12 +#define F_GETLEASE 13 + /* for F_[GET|SET]FL */ #define FD_CLOEXEC 1 /* actually anything with low bit set goes */ @@ -46,6 +49,9 @@ /* for old implementation of bsd flock () */ #define F_EXLCK 4 /* or 3 */ #define F_SHLCK 8 /* or 4 */ + +/* for leases */ +#define F_INPROGRESS 16 /* operations for bsd flock(), also used by the kernel implementation */ #define LOCK_SH 1 /* shared lock */ diff -urNX ../exclude linux-2.3.99pre8-linus/include/asm-i386/resource.h linux-2.3.99pre8+lock/include/asm-i386/resource.h --- linux-2.3.99pre8-linus/include/asm-i386/resource.h Thu Dec 9 16:29:05 1999 +++ linux-2.3.99pre8+lock/include/asm-i386/resource.h Mon May 15 08:31:52 2000 @@ -15,8 +15,9 @@ #define RLIMIT_NOFILE 7 /* max number of open files */ #define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */ #define RLIMIT_AS 9 /* address space limit */ +#define RLIMIT_LOCKS 10 /* maximum file locks held */ -#define RLIM_NLIMITS 10 +#define RLIM_NLIMITS 11 /* * SuS says limits have to be unsigned. @@ -38,6 +39,7 @@ { INR_OPEN, INR_OPEN }, \ { RLIM_INFINITY, RLIM_INFINITY }, \ { RLIM_INFINITY, RLIM_INFINITY }, \ + { 100, RLIM_INFINITY }, /* RLIMIT_LOCKS */ \ } #endif /* __KERNEL__ */ diff -urNX ../exclude linux-2.3.99pre8-linus/include/asm-m68k/resource.h linux-2.3.99pre8+lock/include/asm-m68k/resource.h --- linux-2.3.99pre8-linus/include/asm-m68k/resource.h Thu Dec 9 16:29:06 1999 +++ linux-2.3.99pre8+lock/include/asm-m68k/resource.h Mon May 15 08:31:52 2000 @@ -15,8 +15,9 @@ #define RLIMIT_NOFILE 7 /* max number of open files */ #define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space*/ #define RLIMIT_AS 9 /* address space limit */ +#define RLIMIT_LOCKS 10 /* maximum file locks held */ -#define RLIM_NLIMITS 10 +#define RLIM_NLIMITS 11 /* * SuS says limits have to be unsigned. @@ -38,6 +39,7 @@ {INR_OPEN, INR_OPEN}, \ {LONG_MAX, LONG_MAX}, \ {LONG_MAX, LONG_MAX} \ + { 100, LONG_MAX}, /* RLIMIT_LOCKS */ \ } #endif /* __KERNEL__ */ diff -urNX ../exclude linux-2.3.99pre8-linus/include/asm-mips/resource.h linux-2.3.99pre8+lock/include/asm-mips/resource.h --- linux-2.3.99pre8-linus/include/asm-mips/resource.h Sun Mar 12 12:03:25 2000 +++ linux-2.3.99pre8+lock/include/asm-mips/resource.h Mon May 15 08:31:52 2000 @@ -22,8 +22,9 @@ #define RLIMIT_RSS 7 /* max resident set size */ #define RLIMIT_NPROC 8 /* max number of processes */ #define RLIMIT_MEMLOCK 9 /* max locked-in-memory address space */ +#define RLIMIT_LOCKS 10 /* maximum file locks held */ -#define RLIM_NLIMITS 10 /* Number of limit flavors. */ +#define RLIM_NLIMITS 11 /* Number of limit flavors. */ /* * SuS says limits have to be unsigned. @@ -45,6 +46,7 @@ { RLIM_INFINITY, RLIM_INFINITY }, \ { 0, 0 }, \ { RLIM_INFINITY, RLIM_INFINITY }, \ + { 100, RLIM_INFINITY }, \ } #endif /* __KERNEL__ */ diff -urNX ../exclude linux-2.3.99pre8-linus/include/asm-ppc/resource.h linux-2.3.99pre8+lock/include/asm-ppc/resource.h --- linux-2.3.99pre8-linus/include/asm-ppc/resource.h Mon May 15 07:59:26 2000 +++ linux-2.3.99pre8+lock/include/asm-ppc/resource.h Mon May 15 08:31:52 2000 @@ -11,8 +11,9 @@ #define RLIMIT_NOFILE 7 /* max number of open files */ #define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */ #define RLIMIT_AS 9 /* address space limit(?) */ +#define RLIMIT_LOCKS 10 /* maximum file locks held */ -#define RLIM_NLIMITS 10 +#define RLIM_NLIMITS 11 #ifdef __KERNEL__ @@ -35,6 +36,7 @@ { INR_OPEN, INR_OPEN }, \ { RLIM_INFINITY, RLIM_INFINITY }, \ { RLIM_INFINITY, RLIM_INFINITY }, \ + { 100, RLIM_INFINITY}, /* RLIMIT_LOCKS */ \ } #endif /* __KERNEL__ */ diff -urNX ../exclude linux-2.3.99pre8-linus/include/asm-sh/resource.h linux-2.3.99pre8+lock/include/asm-sh/resource.h --- linux-2.3.99pre8-linus/include/asm-sh/resource.h Sun Mar 12 12:03:33 2000 +++ linux-2.3.99pre8+lock/include/asm-sh/resource.h Mon May 15 08:31:52 2000 @@ -15,8 +15,9 @@ #define RLIMIT_NOFILE 7 /* max number of open files */ #define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */ #define RLIMIT_AS 9 /* address space limit */ +#define RLIMIT_LOCKS 10 /* maximum file locks held */ -#define RLIM_NLIMITS 10 +#define RLIM_NLIMITS 11 #ifdef __KERNEL__ @@ -38,6 +39,7 @@ { INR_OPEN, INR_OPEN }, \ { RLIM_INFINITY, RLIM_INFINITY }, \ { RLIM_INFINITY, RLIM_INFINITY }, \ + { 100, RLIM_INFINITY }, /* RLIMIT_LOCKS */ \ } #endif /* __KERNEL__ */ diff -urNX ../exclude linux-2.3.99pre8-linus/include/asm-sparc/resource.h linux-2.3.99pre8+lock/include/asm-sparc/resource.h --- linux-2.3.99pre8-linus/include/asm-sparc/resource.h Tue Dec 21 01:05:52 1999 +++ linux-2.3.99pre8+lock/include/asm-sparc/resource.h Mon May 15 08:31:52 2000 @@ -21,8 +21,9 @@ #define RLIMIT_NPROC 7 /* max number of processes */ #define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */ #define RLIMIT_AS 9 /* address space limit */ +#define RLIMIT_LOCKS 10 /* maximum file locks held */ -#define RLIM_NLIMITS 10 +#define RLIM_NLIMITS 11 /* * SuS says limits have to be unsigned. @@ -43,6 +44,7 @@ {INR_OPEN, INR_OPEN}, {0, 0}, \ {RLIM_INFINITY, RLIM_INFINITY}, \ {RLIM_INFINITY, RLIM_INFINITY} \ + { 100, RLIM_INFINITY}, /* RLIMIT_LOCKS */ \ } #endif /* __KERNEL__ */ diff -urNX ../exclude linux-2.3.99pre8-linus/include/asm-sparc64/resource.h linux-2.3.99pre8+lock/include/asm-sparc64/resource.h --- linux-2.3.99pre8-linus/include/asm-sparc64/resource.h Tue Dec 21 01:05:52 1999 +++ linux-2.3.99pre8+lock/include/asm-sparc64/resource.h Mon May 15 08:31:52 2000 @@ -21,8 +21,9 @@ #define RLIMIT_NPROC 7 /* max number of processes */ #define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */ #define RLIMIT_AS 9 /* address space limit */ +#define RLIMIT_LOCKS 10 /* maximum file locks held */ -#define RLIM_NLIMITS 10 +#define RLIM_NLIMITS 11 /* * SuS says limits have to be unsigned. @@ -42,6 +43,7 @@ {INR_OPEN, INR_OPEN}, {0, 0}, \ {RLIM_INFINITY, RLIM_INFINITY}, \ {RLIM_INFINITY, RLIM_INFINITY} \ + { 100, RLIM_INFINITY}, /* RLIMIT_LOCKS */ \ } #endif /* __KERNEL__ */ diff -urNX ../exclude linux-2.3.99pre8-linus/include/linux/fs.h linux-2.3.99pre8+lock/include/linux/fs.h --- linux-2.3.99pre8-linus/include/linux/fs.h Mon May 15 07:59:26 2000 +++ linux-2.3.99pre8+lock/include/linux/fs.h Sat May 20 04:32:48 2000 @@ -187,6 +187,7 @@ extern void buffer_init(unsigned long); extern void inode_init(unsigned long); extern void file_table_init(void); +extern void filelock_init(void); extern void dcache_init(unsigned long); /* bh state bits */ @@ -492,6 +494,7 @@ #define FL_BROKEN 4 /* broken flock() emulation */ #define FL_ACCESS 8 /* for processes suspended by mandatory locking */ #define FL_LOCKD 16 /* lock held by rpc.lockd */ +#define FL_LEASE 32 /* lease held on this file */ /* * The POSIX file lock owner is determined by @@ -503,19 +506,17 @@ typedef struct files_struct *fl_owner_t; struct file_lock { - struct file_lock *fl_next; /* singly linked list for this inode */ - struct file_lock *fl_nextlink; /* doubly linked list of all locks */ - struct file_lock *fl_prevlink; /* used to simplify lock removal */ - struct file_lock *fl_nextblock; /* circular list of blocked processes */ - struct file_lock *fl_prevblock; - fl_owner_t fl_owner; - unsigned int fl_pid; - wait_queue_head_t fl_wait; - struct file *fl_file; - unsigned char fl_flags; - unsigned char fl_type; - loff_t fl_start; - loff_t fl_end; + struct file_lock * fl_next; /* Single list for this inode */ + struct list_head fl_link; /* Circular list of all locks */ + struct list_head fl_block; /* Circular list of blocked locks */ + fl_owner_t fl_owner; /* Thread group for POSIX locks */ + unsigned int fl_pid; + wait_queue_head_t fl_wait; /* Blocked locks wait on this */ + struct file * fl_file; /* File this lock belongs to */ + unsigned char fl_flags; /* See above for flag values */ + unsigned char fl_type; /* POSIX or BSD lock */ + loff_t fl_start; /* First byte locked */ + loff_t fl_end; /* Last byte locked */ void (*fl_notify)(struct file_lock *); /* unblock callback */ void (*fl_insert)(struct file_lock *); /* lock insertion callback */ @@ -532,20 +533,24 @@ #define OFFSET_MAX INT_LIMIT(loff_t) #endif -extern struct file_lock *file_lock_table; - #include extern int fcntl_getlk(unsigned int, struct flock *); extern int fcntl_setlk(unsigned int, unsigned int, struct flock *); /* fs/locks.c */ +extern struct semaphore file_lock_sem; +extern struct list_head file_lock_list; + extern void locks_remove_posix(struct file *, fl_owner_t); extern void locks_remove_flock(struct file *); extern struct file_lock *posix_test_lock(struct file *, struct file_lock *); extern int posix_lock_file(struct file *, struct file_lock *, unsigned int); extern void posix_block_lock(struct file_lock *, struct file_lock *); extern void posix_unblock_lock(struct file_lock *); +extern int __get_lease(struct inode *inode, unsigned int flags); + +#define FASYNC_MAGIC 0x4601 struct fasync_struct { int magic; @@ -554,6 +559,16 @@ struct file *fa_file; }; +extern void __kill_fasync(struct fasync_struct *, int, int); +extern void kill_fasync(struct fasync_struct **, int, int); +extern int file_fasync(int, struct file *, int); +#define inode_send_fasync(inode) \ + kill_fasync(&inode->i_fasync, SIGIO, POLL_MSG) +#define tty_send_fasync(fa) \ + kill_fasync(fa, SIGIO, POLL_IN) + +extern int fasync_helper(int, struct file *, int, struct fasync_struct **); + struct nameidata { struct dentry *dentry; struct vfsmount *mnt; @@ -815,7 +826,7 @@ #define MANDATORY_LOCK(inode) \ (IS_MANDLOCK(inode) && ((inode)->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) -static inline int locks_verify_locked(struct inode *inode) +extern inline int locks_verify_locked(struct inode *inode) { if (MANDATORY_LOCK(inode)) return locks_mandatory_locked(inode); @@ -844,6 +855,12 @@ return 0; } +extern inline int get_lease(struct inode *inode, unsigned int mode) +{ + if (inode->i_flock && (inode->i_flock->fl_flags & FL_LEASE)) + return __get_lease(inode, mode); + return 0; +} /* fs/open.c */ diff -urNX ../exclude linux-2.3.99pre8-linus/include/linux/sched.h linux-2.3.99pre8+lock/include/linux/sched.h --- linux-2.3.99pre8-linus/include/linux/sched.h Mon May 15 07:59:26 2000 +++ linux-2.3.99pre8+lock/include/linux/sched.h Sat May 20 04:34:19 2000 @@ -325,6 +325,7 @@ /* file system info */ int link_count; struct tty_struct *tty; /* NULL if no tty */ + unsigned int locks; /* How many file locks are being held */ /* ipc stuff */ struct sem_undo *semundo; struct sem_queue *semsleeping; diff -urNX ../exclude linux-2.3.99pre8-linus/init/main.c linux-2.3.99pre8+lock/init/main.c --- linux-2.3.99pre8-linus/init/main.c Mon May 15 07:59:39 2000 +++ linux-2.3.99pre8+lock/init/main.c Sat May 20 00:56:08 2000 @@ -577,6 +578,8 @@ bdev_init(); inode_init(mempages); file_table_init(); + filelock_init(); + fasync_init(); #if defined(CONFIG_SYSVIPC) ipc_init(); #endif diff -urNX ../exclude linux-2.3.99pre8-linus/kernel/ksyms.c linux-2.3.99pre8+lock/kernel/ksyms.c --- linux-2.3.99pre8-linus/kernel/ksyms.c Mon May 15 07:59:26 2000 +++ linux-2.3.99pre8+lock/kernel/ksyms.c Mon May 15 08:31:53 2000 @@ -210,7 +210,8 @@ EXPORT_SYMBOL(generic_buffer_fdatasync); EXPORT_SYMBOL(page_hash_bits); EXPORT_SYMBOL(page_hash_table); -EXPORT_SYMBOL(file_lock_table); +EXPORT_SYMBOL(file_lock_list); +EXPORT_SYMBOL(file_lock_sem); EXPORT_SYMBOL(posix_lock_file); EXPORT_SYMBOL(posix_test_lock); EXPORT_SYMBOL(posix_block_lock);