From 5263e31e452fb84138b9bee061d5c06c0f359fea Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 16 Jan 2015 15:05:55 -0500 Subject: locks: move flock locks to file_lock_context Signed-off-by: Jeff Layton Acked-by: Christoph Hellwig --- fs/nfs/write.c | 43 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 5 deletions(-) (limited to 'fs/nfs/write.c') diff --git a/fs/nfs/write.c b/fs/nfs/write.c index af3af685a9e3..e072aeb34195 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1113,6 +1113,11 @@ int nfs_flush_incompatible(struct file *file, struct page *page) do_flush |= l_ctx->lockowner.l_owner != current->files || l_ctx->lockowner.l_pid != current->tgid; } + if (l_ctx && ctx->dentry->d_inode->i_flctx && + !list_empty_careful(&ctx->dentry->d_inode->i_flctx->flc_flock)) { + do_flush |= l_ctx->lockowner.l_owner != current->files + || l_ctx->lockowner.l_pid != current->tgid; + } nfs_release_request(req); if (!do_flush) return 0; @@ -1170,6 +1175,13 @@ out: return PageUptodate(page) != 0; } +static bool +is_whole_file_wrlock(struct file_lock *fl) +{ + return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && + fl->fl_type == F_WRLCK; +} + /* If we know the page is up to date, and we're not using byte range locks (or * if we have the whole file locked for writing), it may be more efficient to * extend the write to cover the entire page in order to avoid fragmentation @@ -1180,17 +1192,38 @@ out: */ static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode) { + int ret; + struct file_lock_context *flctx = inode->i_flctx; + struct file_lock *fl; + if (file->f_flags & O_DSYNC) return 0; if (!nfs_write_pageuptodate(page, inode)) return 0; if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) return 1; - if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 && - inode->i_flock->fl_end == OFFSET_MAX && - inode->i_flock->fl_type != F_RDLCK)) - return 1; - return 0; + if (!inode->i_flock && !flctx) + return 0; + + /* Check to see if there are whole file write locks */ + spin_lock(&inode->i_lock); + ret = 0; + + fl = inode->i_flock; + if (fl && is_whole_file_wrlock(fl)) { + ret = 1; + goto out; + } + + if (!list_empty(&flctx->flc_flock)) { + fl = list_first_entry(&flctx->flc_flock, struct file_lock, + fl_list); + if (fl->fl_type == F_WRLCK) + ret = 1; + } +out: + spin_unlock(&inode->i_lock); + return ret; } /* -- cgit v1.2.1 From bd61e0a9c852de2d705b6f1bb2cc54c5774db570 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 16 Jan 2015 15:05:55 -0500 Subject: locks: convert posix locks to file_lock_context Signed-off-by: Jeff Layton Acked-by: Christoph Hellwig --- fs/nfs/write.c | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) (limited to 'fs/nfs/write.c') diff --git a/fs/nfs/write.c b/fs/nfs/write.c index e072aeb34195..784c13485b3f 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1091,6 +1091,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page) { struct nfs_open_context *ctx = nfs_file_open_context(file); struct nfs_lock_context *l_ctx; + struct file_lock_context *flctx = file_inode(file)->i_flctx; struct nfs_page *req; int do_flush, status; /* @@ -1109,12 +1110,9 @@ int nfs_flush_incompatible(struct file *file, struct page *page) do_flush = req->wb_page != page || req->wb_context != ctx; /* for now, flush if more than 1 request in page_group */ do_flush |= req->wb_this_page != req; - if (l_ctx && ctx->dentry->d_inode->i_flock != NULL) { - do_flush |= l_ctx->lockowner.l_owner != current->files - || l_ctx->lockowner.l_pid != current->tgid; - } - if (l_ctx && ctx->dentry->d_inode->i_flctx && - !list_empty_careful(&ctx->dentry->d_inode->i_flctx->flc_flock)) { + if (l_ctx && flctx && + !(list_empty_careful(&flctx->flc_posix) && + list_empty_careful(&flctx->flc_flock))) { do_flush |= l_ctx->lockowner.l_owner != current->files || l_ctx->lockowner.l_pid != current->tgid; } @@ -1202,26 +1200,24 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino return 0; if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) return 1; - if (!inode->i_flock && !flctx) + if (!flctx || (list_empty_careful(&flctx->flc_flock) && + list_empty_careful(&flctx->flc_posix))) return 0; /* Check to see if there are whole file write locks */ - spin_lock(&inode->i_lock); ret = 0; - - fl = inode->i_flock; - if (fl && is_whole_file_wrlock(fl)) { - ret = 1; - goto out; - } - - if (!list_empty(&flctx->flc_flock)) { + spin_lock(&inode->i_lock); + if (!list_empty(&flctx->flc_posix)) { + fl = list_first_entry(&flctx->flc_posix, struct file_lock, + fl_list); + if (is_whole_file_wrlock(fl)) + ret = 1; + } else if (!list_empty(&flctx->flc_flock)) { fl = list_first_entry(&flctx->flc_flock, struct file_lock, fl_list); if (fl->fl_type == F_WRLCK) ret = 1; } -out: spin_unlock(&inode->i_lock); return ret; } -- cgit v1.2.1 From 6109c85037e53443f29fd39c0de69f578a1cf285 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 16 Jan 2015 15:05:57 -0500 Subject: locks: add a dedicated spinlock to protect i_flctx lists We can now add a dedicated spinlock without expanding struct inode. Change to using that to protect the various i_flctx lists. Signed-off-by: Jeff Layton Acked-by: Christoph Hellwig --- fs/nfs/write.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs/nfs/write.c') diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 784c13485b3f..4ae66f416eb9 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1206,7 +1206,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino /* Check to see if there are whole file write locks */ ret = 0; - spin_lock(&inode->i_lock); + spin_lock(&flctx->flc_lock); if (!list_empty(&flctx->flc_posix)) { fl = list_first_entry(&flctx->flc_posix, struct file_lock, fl_list); @@ -1218,7 +1218,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino if (fl->fl_type == F_WRLCK) ret = 1; } - spin_unlock(&inode->i_lock); + spin_unlock(&flctx->flc_lock); return ret; } -- cgit v1.2.1