diff options
author | Dmitry Torokhov <dtor@insightbb.com> | 2006-12-08 01:07:56 -0500 |
---|---|---|
committer | Dmitry Torokhov <dtor@insightbb.com> | 2006-12-08 01:07:56 -0500 |
commit | bef986502fa398b1785a3979b1aa17cd902d3527 (patch) | |
tree | b59c1afe7b1dfcc001b86e54863f550d7ddc8c34 /fs | |
parent | 4bdbd2807deeccc0793d57fb5120d7a53f2c0b3c (diff) | |
parent | c99767974ebd2a719d849fdeaaa1674456f5283f (diff) | |
download | blackbird-op-linux-bef986502fa398b1785a3979b1aa17cd902d3527.tar.gz blackbird-op-linux-bef986502fa398b1785a3979b1aa17cd902d3527.zip |
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/usb/input/hid.h
Diffstat (limited to 'fs')
320 files changed, 5365 insertions, 4124 deletions
diff --git a/fs/9p/conv.c b/fs/9p/conv.c index 56d88c1a09c5..a3ed571eee31 100644 --- a/fs/9p/conv.c +++ b/fs/9p/conv.c @@ -27,6 +27,7 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> +#include <linux/sched.h> #include <linux/idr.h> #include <asm/uaccess.h> #include "debug.h" diff --git a/fs/9p/fcall.c b/fs/9p/fcall.c index 8556097fcda8..dc336a67592f 100644 --- a/fs/9p/fcall.c +++ b/fs/9p/fcall.c @@ -27,6 +27,7 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> +#include <linux/sched.h> #include <linux/idr.h> #include "debug.h" diff --git a/fs/9p/fid.c b/fs/9p/fid.c index 70492ccb4385..27507201f9e7 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c @@ -23,6 +23,7 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> +#include <linux/sched.h> #include <linux/idr.h> #include "debug.h" diff --git a/fs/9p/mux.c b/fs/9p/mux.c index 90a79c784549..944273c3dbff 100644 --- a/fs/9p/mux.c +++ b/fs/9p/mux.c @@ -110,8 +110,8 @@ struct v9fs_mux_rpc { }; static int v9fs_poll_proc(void *); -static void v9fs_read_work(void *); -static void v9fs_write_work(void *); +static void v9fs_read_work(struct work_struct *work); +static void v9fs_write_work(struct work_struct *work); static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, poll_table * p); static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); @@ -297,8 +297,8 @@ struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize, m->rbuf = NULL; m->wpos = m->wsize = 0; m->wbuf = NULL; - INIT_WORK(&m->rq, v9fs_read_work, m); - INIT_WORK(&m->wq, v9fs_write_work, m); + INIT_WORK(&m->rq, v9fs_read_work); + INIT_WORK(&m->wq, v9fs_write_work); m->wsched = 0; memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); m->poll_task = NULL; @@ -458,13 +458,13 @@ static int v9fs_poll_proc(void *a) /** * v9fs_write_work - called when a transport can send some data */ -static void v9fs_write_work(void *a) +static void v9fs_write_work(struct work_struct *work) { int n, err; struct v9fs_mux_data *m; struct v9fs_req *req; - m = a; + m = container_of(work, struct v9fs_mux_data, wq); if (m->err < 0) { clear_bit(Wworksched, &m->wsched); @@ -564,7 +564,7 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) /** * v9fs_read_work - called when there is some data to be read from a transport */ -static void v9fs_read_work(void *a) +static void v9fs_read_work(struct work_struct *work) { int n, err; struct v9fs_mux_data *m; @@ -572,7 +572,7 @@ static void v9fs_read_work(void *a) struct v9fs_fcall *rcall; char *rbuf; - m = a; + m = container_of(work, struct v9fs_mux_data, rq); if (m->err < 0) return; diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 0f628041e3f7..0b96fae8b479 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -26,6 +26,7 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> +#include <linux/sched.h> #include <linux/parser.h> #include <linux/idr.h> diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index e32d5971039b..905c882f4e2f 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c @@ -30,6 +30,7 @@ #include <linux/stat.h> #include <linux/string.h> #include <linux/smp_lock.h> +#include <linux/sched.h> #include <linux/inet.h> #include <linux/idr.h> diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index c3c47eda7574..79e6f9cd7340 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -26,6 +26,7 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> +#include <linux/sched.h> #include <linux/file.h> #include <linux/stat.h> #include <linux/string.h> diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 5241c600ce28..18f26cdfd882 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -256,7 +256,7 @@ static int v9fs_create(struct v9fs_session_info *v9ses, u32 pfid, char *name, u32 perm, u8 mode, char *extension, u32 *fidp, struct v9fs_qid *qid, u32 *iounit) { - u32 fid; + int fid; int err; struct v9fs_fcall *fcall; @@ -310,7 +310,7 @@ static struct v9fs_fid* v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry) { int err; - u32 nfid; + int nfid; struct v9fs_fid *ret; struct v9fs_fcall *fcall; diff --git a/fs/Kconfig b/fs/Kconfig index 133dcc8a4150..b3b5aa0edff9 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -972,7 +972,7 @@ config SYSFS Some system agents rely on the information in sysfs to operate. /sbin/hotplug uses device and object attributes in sysfs to assist in - delegating policy decisions, like persistantly naming devices. + delegating policy decisions, like persistently naming devices. sysfs is currently used by the block subsystem to mount the root partition. If sysfs is disabled you must specify the boot device on @@ -1145,7 +1145,7 @@ config BEFS_FS help The BeOS File System (BeFS) is the native file system of Be, Inc's BeOS. Notable features include support for arbitrary attributes - on files and directories, and database-like indeces on selected + on files and directories, and database-like indices on selected attributes. (Also note that this driver doesn't make those features available at this time). It is a 64 bit filesystem, so it supports extremely large volumes and files. @@ -2060,8 +2060,7 @@ config CODA_FS_OLD_API For most cases you probably want to say N. config AFS_FS -# for fs/nls/Config.in - tristate "Andrew File System support (AFS) (Experimental)" + tristate "Andrew File System support (AFS) (EXPERIMENTAL)" depends on INET && EXPERIMENTAL select RXRPC help diff --git a/fs/adfs/super.c b/fs/adfs/super.c index 9ade139086fc..5023351a7afe 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -36,7 +36,7 @@ void __adfs_error(struct super_block *sb, const char *function, const char *fmt, va_list args; va_start(args, fmt); - vsprintf(error_buf, fmt, args); + vsnprintf(error_buf, sizeof(error_buf), fmt, args); va_end(args); printk(KERN_CRIT "ADFS-fs error (device %s)%s%s: %s\n", @@ -212,12 +212,12 @@ static int adfs_statfs(struct dentry *dentry, struct kstatfs *buf) return 0; } -static kmem_cache_t *adfs_inode_cachep; +static struct kmem_cache *adfs_inode_cachep; static struct inode *adfs_alloc_inode(struct super_block *sb) { struct adfs_inode_info *ei; - ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, SLAB_KERNEL); + ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -228,7 +228,7 @@ static void adfs_destroy_inode(struct inode *inode) kmem_cache_free(adfs_inode_cachep, ADFS_I(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct adfs_inode_info *ei = (struct adfs_inode_info *) foo; diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index ccd624ef4272..f4de4b98004f 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -445,7 +445,7 @@ affs_error(struct super_block *sb, const char *function, const char *fmt, ...) va_list args; va_start(args,fmt); - vsprintf(ErrorBuffer,fmt,args); + vsnprintf(ErrorBuffer,sizeof(ErrorBuffer),fmt,args); va_end(args); printk(KERN_CRIT "AFFS error (device %s): %s(): %s\n", sb->s_id, @@ -461,7 +461,7 @@ affs_warning(struct super_block *sb, const char *function, const char *fmt, ...) va_list args; va_start(args,fmt); - vsprintf(ErrorBuffer,fmt,args); + vsnprintf(ErrorBuffer,sizeof(ErrorBuffer),fmt,args); va_end(args); printk(KERN_WARNING "AFFS warning (device %s): %s(): %s\n", sb->s_id, diff --git a/fs/affs/bitmap.c b/fs/affs/bitmap.c index b0b953683c1a..b330009fe42d 100644 --- a/fs/affs/bitmap.c +++ b/fs/affs/bitmap.c @@ -289,12 +289,11 @@ int affs_init_bitmap(struct super_block *sb, int *flags) sbi->s_bmap_count = (sbi->s_partition_size - sbi->s_reserved + sbi->s_bmap_bits - 1) / sbi->s_bmap_bits; size = sbi->s_bmap_count * sizeof(*bm); - bm = sbi->s_bitmap = kmalloc(size, GFP_KERNEL); + bm = sbi->s_bitmap = kzalloc(size, GFP_KERNEL); if (!sbi->s_bitmap) { printk(KERN_ERR "AFFS: Bitmap allocation failed\n"); return -ENOMEM; } - memset(sbi->s_bitmap, 0, size); bmap_blk = (__be32 *)sbi->s_root_bh->b_data; blk = sb->s_blocksize / 4 - 49; diff --git a/fs/affs/super.c b/fs/affs/super.c index 5ea72c3a16c3..3de93e799949 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -66,12 +66,12 @@ affs_write_super(struct super_block *sb) pr_debug("AFFS: write_super() at %lu, clean=%d\n", get_seconds(), clean); } -static kmem_cache_t * affs_inode_cachep; +static struct kmem_cache * affs_inode_cachep; static struct inode *affs_alloc_inode(struct super_block *sb) { struct affs_inode_info *ei; - ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, SLAB_KERNEL); + ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, GFP_KERNEL); if (!ei) return NULL; ei->vfs_inode.i_version = 1; @@ -83,7 +83,7 @@ static void affs_destroy_inode(struct inode *inode) kmem_cache_free(affs_inode_cachep, AFFS_I(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct affs_inode_info *ei = (struct affs_inode_info *) foo; diff --git a/fs/afs/kafsasyncd.c b/fs/afs/kafsasyncd.c index f09a794f248e..615df2407cb2 100644 --- a/fs/afs/kafsasyncd.c +++ b/fs/afs/kafsasyncd.c @@ -20,6 +20,7 @@ #include <linux/init.h> #include <linux/sched.h> #include <linux/completion.h> +#include <linux/freezer.h> #include "cell.h" #include "server.h" #include "volume.h" diff --git a/fs/afs/kafstimod.c b/fs/afs/kafstimod.c index 65bc05ab8182..694344e4d3c7 100644 --- a/fs/afs/kafstimod.c +++ b/fs/afs/kafstimod.c @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/sched.h> #include <linux/completion.h> +#include <linux/freezer.h> #include "cell.h" #include "volume.h" #include "kafstimod.h" diff --git a/fs/afs/server.c b/fs/afs/server.c index 22afaae1a4ce..44aff81dc6a7 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -55,13 +55,12 @@ int afs_server_lookup(struct afs_cell *cell, const struct in_addr *addr, _enter("%p,%08x,", cell, ntohl(addr->s_addr)); /* allocate and initialise a server record */ - server = kmalloc(sizeof(struct afs_server), GFP_KERNEL); + server = kzalloc(sizeof(struct afs_server), GFP_KERNEL); if (!server) { _leave(" = -ENOMEM"); return -ENOMEM; } - memset(server, 0, sizeof(struct afs_server)); atomic_set(&server->usage, 1); INIT_LIST_HEAD(&server->link); diff --git a/fs/afs/super.c b/fs/afs/super.c index 67d1f5c819ec..18d9b77ba40f 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -35,7 +35,7 @@ struct afs_mount_params { struct afs_volume *volume; }; -static void afs_i_init_once(void *foo, kmem_cache_t *cachep, +static void afs_i_init_once(void *foo, struct kmem_cache *cachep, unsigned long flags); static int afs_get_sb(struct file_system_type *fs_type, @@ -65,7 +65,7 @@ static struct super_operations afs_super_ops = { .put_super = afs_put_super, }; -static kmem_cache_t *afs_inode_cachep; +static struct kmem_cache *afs_inode_cachep; static atomic_t afs_count_active_inodes; /*****************************************************************************/ @@ -242,14 +242,12 @@ static int afs_fill_super(struct super_block *sb, void *data, int silent) kenter(""); /* allocate a superblock info record */ - as = kmalloc(sizeof(struct afs_super_info), GFP_KERNEL); + as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL); if (!as) { _leave(" = -ENOMEM"); return -ENOMEM; } - memset(as, 0, sizeof(struct afs_super_info)); - afs_get_volume(params->volume); as->volume = params->volume; @@ -384,7 +382,7 @@ static void afs_put_super(struct super_block *sb) /* * initialise an inode cache slab element prior to any use */ -static void afs_i_init_once(void *_vnode, kmem_cache_t *cachep, +static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep, unsigned long flags) { struct afs_vnode *vnode = (struct afs_vnode *) _vnode; @@ -412,7 +410,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb) struct afs_vnode *vnode; vnode = (struct afs_vnode *) - kmem_cache_alloc(afs_inode_cachep, SLAB_KERNEL); + kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL); if (!vnode) return NULL; @@ -47,19 +47,19 @@ unsigned long aio_nr; /* current system wide number of aio requests */ unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ /*----end sysctl variables---*/ -static kmem_cache_t *kiocb_cachep; -static kmem_cache_t *kioctx_cachep; +static struct kmem_cache *kiocb_cachep; +static struct kmem_cache *kioctx_cachep; static struct workqueue_struct *aio_wq; /* Used for rare fput completion. */ -static void aio_fput_routine(void *); -static DECLARE_WORK(fput_work, aio_fput_routine, NULL); +static void aio_fput_routine(struct work_struct *); +static DECLARE_WORK(fput_work, aio_fput_routine); static DEFINE_SPINLOCK(fput_lock); static LIST_HEAD(fput_head); -static void aio_kick_handler(void *); +static void aio_kick_handler(struct work_struct *); static void aio_queue_work(struct kioctx *); /* aio_setup @@ -227,7 +227,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) INIT_LIST_HEAD(&ctx->active_reqs); INIT_LIST_HEAD(&ctx->run_list); - INIT_WORK(&ctx->wq, aio_kick_handler, ctx); + INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler); if (aio_setup_ring(ctx) < 0) goto out_freectx; @@ -367,8 +367,7 @@ void fastcall __put_ioctx(struct kioctx *ctx) { unsigned nr_events = ctx->max_reqs; - if (unlikely(ctx->reqs_active)) - BUG(); + BUG_ON(ctx->reqs_active); cancel_delayed_work(&ctx->wq); flush_workqueue(aio_wq); @@ -470,7 +469,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) wake_up(&ctx->wait); } -static void aio_fput_routine(void *data) +static void aio_fput_routine(struct work_struct *data) { spin_lock_irq(&fput_lock); while (likely(!list_empty(&fput_head))) { @@ -505,8 +504,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) assert_spin_locked(&ctx->ctx_lock); req->ki_users --; - if (unlikely(req->ki_users < 0)) - BUG(); + BUG_ON(req->ki_users < 0); if (likely(req->ki_users)) return 0; list_del(&req->ki_list); /* remove from active_reqs */ @@ -668,17 +666,6 @@ static ssize_t aio_run_iocb(struct kiocb *iocb) ssize_t (*retry)(struct kiocb *); ssize_t ret; - if (iocb->ki_retried++ > 1024*1024) { - printk("Maximal retry count. Bytes done %Zd\n", - iocb->ki_nbytes - iocb->ki_left); - return -EAGAIN; - } - - if (!(iocb->ki_retried & 0xff)) { - pr_debug("%ld retry: %zd of %zd\n", iocb->ki_retried, - iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes); - } - if (!(retry = iocb->ki_retry)) { printk("aio_run_iocb: iocb->ki_retry = NULL\n"); return 0; @@ -859,9 +846,9 @@ static inline void aio_run_all_iocbs(struct kioctx *ctx) * space. * Run on aiod's context. */ -static void aio_kick_handler(void *data) +static void aio_kick_handler(struct work_struct *work) { - struct kioctx *ctx = data; + struct kioctx *ctx = container_of(work, struct kioctx, wq.work); mm_segment_t oldfs = get_fs(); int requeue; @@ -876,7 +863,7 @@ static void aio_kick_handler(void *data) * we're in a worker thread already, don't use queue_delayed_work, */ if (requeue) - queue_work(aio_wq, &ctx->wq); + queue_delayed_work(aio_wq, &ctx->wq, 0); } @@ -1007,9 +994,6 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2) kunmap_atomic(ring, KM_IRQ1); pr_debug("added to ring %p at [%lu]\n", iocb, tail); - - pr_debug("%ld retries: %zd of %zd\n", iocb->ki_retried, - iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes); put_rq: /* everything turned out well, dispose of the aiocb. */ ret = __aio_put_req(ctx, iocb); @@ -1415,7 +1399,6 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb) kiocb->ki_iovec->iov_len = kiocb->ki_left; kiocb->ki_nr_segs = 1; kiocb->ki_cur_seg = 0; - kiocb->ki_nbytes = kiocb->ki_left; return 0; } @@ -1593,7 +1576,6 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, req->ki_opcode = iocb->aio_lio_opcode; init_waitqueue_func_entry(&req->ki_wait, aio_wake_function); INIT_LIST_HEAD(&req->ki_wait.task_list); - req->ki_retried = 0; ret = aio_setup_iocb(req); diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c index 54c518c89e4c..f968d1342808 100644 --- a/fs/autofs/inode.c +++ b/fs/autofs/inode.c @@ -25,6 +25,15 @@ void autofs_kill_sb(struct super_block *sb) struct autofs_sb_info *sbi = autofs_sbi(sb); unsigned int n; + /* + * In the event of a failure in get_sb_nodev the superblock + * info is not present so nothing else has been setup, so + * just call kill_anon_super when we are called from + * deactivate_super. + */ + if (!sbi) + goto out_kill_sb; + if ( !sbi->catatonic ) autofs_catatonic_mode(sbi); /* Free wait queues, close pipe */ @@ -36,6 +45,7 @@ void autofs_kill_sb(struct super_block *sb) kfree(sb->s_fs_info); +out_kill_sb: DPRINTK(("autofs: shutting down\n")); kill_anon_super(sb); } @@ -136,7 +146,8 @@ int autofs_fill_super(struct super_block *s, void *data, int silent) s->s_fs_info = sbi; sbi->magic = AUTOFS_SBI_MAGIC; - sbi->catatonic = 0; + sbi->pipe = NULL; + sbi->catatonic = 1; sbi->exp_timeout = 0; sbi->oz_pgrp = process_group(current); autofs_initialize_hash(&sbi->dirhash); @@ -180,6 +191,7 @@ int autofs_fill_super(struct super_block *s, void *data, int silent) if ( !pipe->f_op || !pipe->f_op->write ) goto fail_fput; sbi->pipe = pipe; + sbi->catatonic = 0; /* * Success! Install the root dentry now to indicate completion. @@ -198,6 +210,7 @@ fail_iput: iput(root_inode); fail_free: kfree(sbi); + s->s_fs_info = NULL; fail_unlock: return -EINVAL; } diff --git a/fs/autofs/waitq.c b/fs/autofs/waitq.c index 633f628005b4..19a9cafb5ddf 100644 --- a/fs/autofs/waitq.c +++ b/fs/autofs/waitq.c @@ -41,6 +41,7 @@ void autofs_catatonic_mode(struct autofs_sb_info *sbi) wq = nwq; } fput(sbi->pipe); /* Close the pipe */ + sbi->pipe = NULL; autofs_hash_dputall(&sbi->dirhash); /* Remove all dentry pointers */ } diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 51fd8595bf85..9c48250fd726 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -99,6 +99,9 @@ static void autofs4_force_release(struct autofs_sb_info *sbi) struct dentry *this_parent = sbi->sb->s_root; struct list_head *next; + if (!sbi->sb->s_root) + return; + spin_lock(&dcache_lock); repeat: next = this_parent->d_subdirs.next; @@ -146,6 +149,15 @@ void autofs4_kill_sb(struct super_block *sb) { struct autofs_sb_info *sbi = autofs4_sbi(sb); + /* + * In the event of a failure in get_sb_nodev the superblock + * info is not present so nothing else has been setup, so + * just call kill_anon_super when we are called from + * deactivate_super. + */ + if (!sbi) + goto out_kill_sb; + sb->s_fs_info = NULL; if ( !sbi->catatonic ) @@ -156,6 +168,7 @@ void autofs4_kill_sb(struct super_block *sb) kfree(sbi); +out_kill_sb: DPRINTK("shutting down"); kill_anon_super(sb); } @@ -310,7 +323,8 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) s->s_fs_info = sbi; sbi->magic = AUTOFS_SBI_MAGIC; sbi->pipefd = -1; - sbi->catatonic = 0; + sbi->pipe = NULL; + sbi->catatonic = 1; sbi->exp_timeout = 0; sbi->oz_pgrp = process_group(current); sbi->sb = s; @@ -388,6 +402,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) goto fail_fput; sbi->pipe = pipe; sbi->pipefd = pipefd; + sbi->catatonic = 0; /* * Success! Install the root dentry now to indicate completion. @@ -412,6 +427,7 @@ fail_ino: kfree(ino); fail_free: kfree(sbi); + s->s_fs_info = NULL; fail_unlock: return -EINVAL; } diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index c0a6c8d445c7..1e4a539f4417 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -41,10 +41,8 @@ void autofs4_catatonic_mode(struct autofs_sb_info *sbi) wake_up_interruptible(&wq->queue); wq = nwq; } - if (sbi->pipe) { - fput(sbi->pipe); /* Close the pipe */ - sbi->pipe = NULL; - } + fput(sbi->pipe); /* Close the pipe */ + sbi->pipe = NULL; } static int autofs4_write(struct file *file, const void *addr, int bytes) diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 07f7144f0e2e..bce402eee554 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -61,7 +61,7 @@ static const struct super_operations befs_sops = { }; /* slab cache for befs_inode_info objects */ -static kmem_cache_t *befs_inode_cachep; +static struct kmem_cache *befs_inode_cachep; static const struct file_operations befs_dir_operations = { .read = generic_read_dir, @@ -277,7 +277,7 @@ befs_alloc_inode(struct super_block *sb) { struct befs_inode_info *bi; bi = (struct befs_inode_info *)kmem_cache_alloc(befs_inode_cachep, - SLAB_KERNEL); + GFP_KERNEL); if (!bi) return NULL; return &bi->vfs_inode; @@ -289,7 +289,7 @@ befs_destroy_inode(struct inode *inode) kmem_cache_free(befs_inode_cachep, BEFS_I(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct befs_inode_info *bi = (struct befs_inode_info *) foo; diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index ed27ffb3459e..eac175ed9f44 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c @@ -228,12 +228,12 @@ static void bfs_write_super(struct super_block *s) unlock_kernel(); } -static kmem_cache_t * bfs_inode_cachep; +static struct kmem_cache * bfs_inode_cachep; static struct inode *bfs_alloc_inode(struct super_block *sb) { struct bfs_inode_info *bi; - bi = kmem_cache_alloc(bfs_inode_cachep, SLAB_KERNEL); + bi = kmem_cache_alloc(bfs_inode_cachep, GFP_KERNEL); if (!bi) return NULL; return &bi->vfs_inode; @@ -244,7 +244,7 @@ static void bfs_destroy_inode(struct inode *inode) kmem_cache_free(bfs_inode_cachep, BFS_I(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct bfs_inode_info *bi = foo; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 79b05a1a4365..be5869d34999 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -47,10 +47,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs); static int load_elf_library(struct file *); static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int); -#ifndef elf_addr_t -#define elf_addr_t unsigned long -#endif - /* * If we don't support core dumping, then supply a NULL so we * don't even try. @@ -243,8 +239,9 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, if (interp_aout) { argv = sp + 2; envp = argv + argc + 1; - __put_user((elf_addr_t)(unsigned long)argv, sp++); - __put_user((elf_addr_t)(unsigned long)envp, sp++); + if (__put_user((elf_addr_t)(unsigned long)argv, sp++) || + __put_user((elf_addr_t)(unsigned long)envp, sp++)) + return -EFAULT; } else { argv = sp; envp = argv + argc + 1; @@ -254,7 +251,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, p = current->mm->arg_end = current->mm->arg_start; while (argc-- > 0) { size_t len; - __put_user((elf_addr_t)p, argv++); + if (__put_user((elf_addr_t)p, argv++)) + return -EFAULT; len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES); if (!len || len > PAGE_SIZE*MAX_ARG_PAGES) return 0; @@ -265,7 +263,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, current->mm->arg_end = current->mm->env_start = p; while (envc-- > 0) { size_t len; - __put_user((elf_addr_t)p, envp++); + if (__put_user((elf_addr_t)p, envp++)) + return -EFAULT; len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES); if (!len || len > PAGE_SIZE*MAX_ARG_PAGES) return 0; @@ -545,7 +544,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) unsigned long reloc_func_desc = 0; char passed_fileno[6]; struct files_struct *files; - int have_pt_gnu_stack, executable_stack = EXSTACK_DEFAULT; + int executable_stack = EXSTACK_DEFAULT; unsigned long def_flags = 0; struct { struct elfhdr elf_ex; @@ -708,7 +707,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) executable_stack = EXSTACK_DISABLE_X; break; } - have_pt_gnu_stack = (i < loc->elf_ex.e_phnum); /* Some simple consistency checks for the interpreter */ if (elf_interpreter) { @@ -856,7 +854,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) * default mmap base, as well as whatever program they * might try to exec. This is because the brk will * follow the loader, and is not movable. */ - load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); + if (current->flags & PF_RANDOMIZE) + load_bias = randomize_range(0x10000, + ELF_ET_DYN_BASE, + 0); + else + load_bias = ELF_ET_DYN_BASE; + load_bias = ELF_PAGESTART(load_bias - vaddr); } error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, @@ -1582,6 +1586,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file) sz += thread_status_size; +#ifdef ELF_CORE_WRITE_EXTRA_NOTES + sz += ELF_CORE_EXTRA_NOTES_SIZE; +#endif + fill_elf_note_phdr(&phdr, sz, offset); offset += sz; DUMP_WRITE(&phdr, sizeof(phdr)); @@ -1622,6 +1630,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file) if (!writenote(notes + i, file, &foffset)) goto end_coredump; +#ifdef ELF_CORE_WRITE_EXTRA_NOTES + ELF_CORE_WRITE_EXTRA_NOTES; +#endif + /* write out the thread status notes section */ list_for_each(t, &thread_list) { struct elf_thread_status *tmp = diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index f86d5c9ce5eb..ed9a61c6beb3 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -40,9 +40,6 @@ #include <asm/pgalloc.h> typedef char *elf_caddr_t; -#ifndef elf_addr_t -#define elf_addr_t unsigned long -#endif #if 0 #define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ ) @@ -30,7 +30,7 @@ #define BIO_POOL_SIZE 256 -static kmem_cache_t *bio_slab __read_mostly; +static struct kmem_cache *bio_slab __read_mostly; #define BIOVEC_NR_POOLS 6 @@ -44,7 +44,7 @@ mempool_t *bio_split_pool __read_mostly; struct biovec_slab { int nr_vecs; char *name; - kmem_cache_t *slab; + struct kmem_cache *slab; }; /* @@ -560,10 +560,8 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, break; } - if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) { - ret = -EINVAL; + if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) break; - } len -= bytes; } @@ -622,10 +620,9 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, nr_pages += end - start; /* - * transfer and buffer must be aligned to at least hardsector - * size for now, in the future we can relax this restriction + * buffer must be aligned to at least hardsector size for now */ - if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) + if (uaddr & queue_dma_alignment(q)) return ERR_PTR(-EINVAL); } @@ -751,7 +748,6 @@ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, int write_to_vm) { struct bio *bio; - int len = 0, i; bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); @@ -766,18 +762,7 @@ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, */ bio_get(bio); - for (i = 0; i < iov_count; i++) - len += iov[i].iov_len; - - if (bio->bi_size == len) - return bio; - - /* - * don't support partial mappings - */ - bio_endio(bio, bio->bi_size, 0); - bio_unmap_user(bio); - return ERR_PTR(-EINVAL); + return bio; } static void __bio_unmap_user(struct bio *bio) @@ -955,16 +940,16 @@ static void bio_release_pages(struct bio *bio) * run one bio_put() against the BIO. */ -static void bio_dirty_fn(void *data); +static void bio_dirty_fn(struct work_struct *work); -static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); +static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); static DEFINE_SPINLOCK(bio_dirty_lock); static struct bio *bio_dirty_list; /* * This runs in process context */ -static void bio_dirty_fn(void *data) +static void bio_dirty_fn(struct work_struct *work) { unsigned long flags; struct bio *bio; diff --git a/fs/block_dev.c b/fs/block_dev.c index 36c0e7af9d0f..13816b4d76f6 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -235,11 +235,11 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) */ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); -static kmem_cache_t * bdev_cachep __read_mostly; +static struct kmem_cache * bdev_cachep __read_mostly; static struct inode *bdev_alloc_inode(struct super_block *sb) { - struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, SLAB_KERNEL); + struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -253,7 +253,7 @@ static void bdev_destroy_inode(struct inode *inode) kmem_cache_free(bdev_cachep, bdi); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct bdev_inode *ei = (struct bdev_inode *) foo; struct block_device *bdev = &ei->bdev; diff --git a/fs/buffer.c b/fs/buffer.c index 35527dca1dbc..517860f2d75b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2908,7 +2908,7 @@ asmlinkage long sys_bdflush(int func, long data) /* * Buffer-head allocation */ -static kmem_cache_t *bh_cachep; +static struct kmem_cache *bh_cachep; /* * Once the number of bh's in the machine exceeds this level, we start @@ -2961,7 +2961,7 @@ void free_buffer_head(struct buffer_head *bh) EXPORT_SYMBOL(free_buffer_head); static void -init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags) +init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags) { if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) { @@ -2972,7 +2972,6 @@ init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags) } } -#ifdef CONFIG_HOTPLUG_CPU static void buffer_exit_cpu(int cpu) { int i; @@ -2994,7 +2993,6 @@ static int buffer_cpu_notify(struct notifier_block *self, buffer_exit_cpu((unsigned long)hcpu); return NOTIFY_OK; } -#endif /* CONFIG_HOTPLUG_CPU */ void __init buffer_init(void) { diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 84976cdbe713..71bc87a37fc1 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -34,6 +34,7 @@ #include <linux/mempool.h> #include <linux/delay.h> #include <linux/kthread.h> +#include <linux/freezer.h> #include "cifsfs.h" #include "cifspdu.h" #define DECLARE_GLOBALS_HERE @@ -81,7 +82,7 @@ extern mempool_t *cifs_sm_req_poolp; extern mempool_t *cifs_req_poolp; extern mempool_t *cifs_mid_poolp; -extern kmem_cache_t *cifs_oplock_cachep; +extern struct kmem_cache *cifs_oplock_cachep; static int cifs_read_super(struct super_block *sb, void *data, @@ -232,11 +233,11 @@ static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd) return generic_permission(inode, mask, NULL); } -static kmem_cache_t *cifs_inode_cachep; -static kmem_cache_t *cifs_req_cachep; -static kmem_cache_t *cifs_mid_cachep; -kmem_cache_t *cifs_oplock_cachep; -static kmem_cache_t *cifs_sm_req_cachep; +static struct kmem_cache *cifs_inode_cachep; +static struct kmem_cache *cifs_req_cachep; +static struct kmem_cache *cifs_mid_cachep; +struct kmem_cache *cifs_oplock_cachep; +static struct kmem_cache *cifs_sm_req_cachep; mempool_t *cifs_sm_req_poolp; mempool_t *cifs_req_poolp; mempool_t *cifs_mid_poolp; @@ -245,7 +246,7 @@ static struct inode * cifs_alloc_inode(struct super_block *sb) { struct cifsInodeInfo *cifs_inode; - cifs_inode = kmem_cache_alloc(cifs_inode_cachep, SLAB_KERNEL); + cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL); if (!cifs_inode) return NULL; cifs_inode->cifsAttrs = 0x20; /* default */ @@ -668,7 +669,7 @@ const struct file_operations cifs_dir_ops = { }; static void -cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags) +cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags) { struct cifsInodeInfo *cifsi = inode; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 71f77914ce93..2caca06b4bae 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -31,6 +31,7 @@ #include <linux/delay.h> #include <linux/completion.h> #include <linux/pagevec.h> +#include <linux/freezer.h> #include <asm/uaccess.h> #include <asm/processor.h> #include "cifspdu.h" diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 7e056b9b49e8..2436ed8fc840 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -492,10 +492,14 @@ int cifs_close(struct inode *inode, struct file *file) the struct would be in each open file, but this should give enough time to clear the socket */ - cERROR(1,("close with pending writes")); +#ifdef CONFIG_CIFS_DEBUG2 + cFYI(1,("close delay, write pending")); +#endif /* DEBUG2 */ msleep(timeout); timeout *= 4; - } + } + if(atomic_read(&pSMBFile->wrtPending)) + cERROR(1,("close with pending writes")); rc = CIFSSMBClose(xid, pTcon, pSMBFile->netfid); } diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index dffe295825f4..c4fa91b8b62f 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -318,6 +318,7 @@ int cifs_get_inode_info(struct inode **pinode, struct cifs_sb_info *cifs_sb = CIFS_SB(sb); char *tmp_path; char *buf = NULL; + int adjustTZ = FALSE; pTcon = cifs_sb->tcon; cFYI(1,("Getting info on %s", search_path)); @@ -348,6 +349,7 @@ int cifs_get_inode_info(struct inode **pinode, pfindData, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); + adjustTZ = TRUE; } } @@ -444,6 +446,10 @@ int cifs_get_inode_info(struct inode **pinode, inode->i_ctime = cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime)); cFYI(0, ("Attributes came in as 0x%x", attr)); + if(adjustTZ && (pTcon->ses) && (pTcon->ses->server)) { + inode->i_ctime.tv_sec += pTcon->ses->server->timeAdj; + inode->i_mtime.tv_sec += pTcon->ses->server->timeAdj; + } /* set default mode. will override for dirs below */ if (atomic_read(&cifsInfo->inUse) == 0) @@ -1089,8 +1095,10 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { int err = cifs_revalidate(dentry); - if (!err) + if (!err) { generic_fillattr(dentry->d_inode, stat); + stat->blksize = CIFS_MAX_MSGSIZE; + } return err; } diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 0bee8b7e521a..8e259969354b 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -69,17 +69,30 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode, rc = -EOPNOTSUPP; } -/* if (!rc) */ - { - /* renew_parental_timestamps(old_file); - inode->i_nlink++; - mark_inode_dirty(inode); - d_instantiate(direntry, inode); */ - /* BB add call to either mark inode dirty or refresh its data and timestamp to current time */ + d_drop(direntry); /* force new lookup from server of target */ + + /* if source file is cached (oplocked) revalidate will not go to server + until the file is closed or oplock broken so update nlinks locally */ + if(old_file->d_inode) { + cifsInode = CIFS_I(old_file->d_inode); + if(rc == 0) { + old_file->d_inode->i_nlink++; + old_file->d_inode->i_ctime = CURRENT_TIME; + /* parent dir timestamps will update from srv + within a second, would it really be worth it + to set the parent dir cifs inode time to zero + to force revalidate (faster) for it too? */ + } + /* if not oplocked will force revalidate to get info + on source file from srv */ + cifsInode->time = 0; + + /* Will update parent dir timestamps from srv within a second. + Would it really be worth it to set the parent dir (cifs + inode) time field to zero to force revalidate on parent + directory faster ie + CIFS_I(inode)->time = 0; */ } - d_drop(direntry); /* force new lookup from server */ - cifsInode = CIFS_I(old_file->d_inode); - cifsInode->time = 0; /* will force revalidate to go get info when needed */ cifs_hl_exit: kfree(fromName); diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index bbc9cd34b6ea..aedf683f011f 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -153,7 +153,7 @@ cifs_buf_get(void) albeit slightly larger than necessary and maxbuffersize defaults to this and can not be bigger */ ret_buf = - (struct smb_hdr *) mempool_alloc(cifs_req_poolp, SLAB_KERNEL | SLAB_NOFS); + (struct smb_hdr *) mempool_alloc(cifs_req_poolp, GFP_KERNEL | GFP_NOFS); /* clear the first few header bytes */ /* for most paths, more is cleared in header_assemble */ @@ -192,7 +192,7 @@ cifs_small_buf_get(void) albeit slightly larger than necessary and maxbuffersize defaults to this and can not be bigger */ ret_buf = - (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, SLAB_KERNEL | SLAB_NOFS); + (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, GFP_KERNEL | GFP_NOFS); if (ret_buf) { /* No need to clear memory here, cleared in header assemble */ /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index a8a083543ba0..bbdda99dce61 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -90,7 +90,9 @@ static void unicode_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses, } */ /* copy user */ if(ses->userName == NULL) { - /* BB what about null user mounts - check that we do this BB */ + /* null user mount */ + *bcc_ptr = 0; + *(bcc_ptr+1) = 0; } else { /* 300 should be long enough for any conceivable user name */ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName, 300, nls_cp); @@ -98,10 +100,13 @@ static void unicode_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses, bcc_ptr += 2 * bytes_ret; bcc_ptr += 2; /* account for null termination */ /* copy domain */ - if(ses->domainName == NULL) - bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, - "CIFS_LINUX_DOM", 32, nls_cp); - else + if(ses->domainName == NULL) { + /* Sending null domain better than using a bogus domain name (as + we did briefly in 2.6.18) since server will use its default */ + *bcc_ptr = 0; + *(bcc_ptr+1) = 0; + bytes_ret = 0; + } else bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->domainName, 256, nls_cp); bcc_ptr += 2 * bytes_ret; @@ -144,13 +149,11 @@ static void ascii_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses, /* copy domain */ - if(ses->domainName == NULL) { - strcpy(bcc_ptr, "CIFS_LINUX_DOM"); - bcc_ptr += 14; /* strlen(CIFS_LINUX_DOM) */ - } else { + if(ses->domainName != NULL) { strncpy(bcc_ptr, ses->domainName, 256); bcc_ptr += strnlen(ses->domainName, 256); - } + } /* else we will send a null domain name + so the server will default to its own domain */ *bcc_ptr = 0; bcc_ptr++; diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 48d47b46b1fb..f80007eaebf4 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -34,7 +34,7 @@ #include "cifs_debug.h" extern mempool_t *cifs_mid_poolp; -extern kmem_cache_t *cifs_oplock_cachep; +extern struct kmem_cache *cifs_oplock_cachep; static struct mid_q_entry * AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses) @@ -51,7 +51,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses) } temp = (struct mid_q_entry *) mempool_alloc(cifs_mid_poolp, - SLAB_KERNEL | SLAB_NOFS); + GFP_KERNEL | GFP_NOFS); if (temp == NULL) return temp; else { @@ -118,7 +118,7 @@ AllocOplockQEntry(struct inode * pinode, __u16 fid, struct cifsTconInfo * tcon) return NULL; } temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep, - SLAB_KERNEL); + GFP_KERNEL); if (temp == NULL) return temp; else { diff --git a/fs/coda/inode.c b/fs/coda/inode.c index 88d123321164..b64659fa82d0 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c @@ -38,12 +38,12 @@ static void coda_clear_inode(struct inode *); static void coda_put_super(struct super_block *); static int coda_statfs(struct dentry *dentry, struct kstatfs *buf); -static kmem_cache_t * coda_inode_cachep; +static struct kmem_cache * coda_inode_cachep; static struct inode *coda_alloc_inode(struct super_block *sb) { struct coda_inode_info *ei; - ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, SLAB_KERNEL); + ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL); if (!ei) return NULL; memset(&ei->c_fid, 0, sizeof(struct CodaFid)); @@ -58,7 +58,7 @@ static void coda_destroy_inode(struct inode *inode) kmem_cache_free(coda_inode_cachep, ITOC(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct coda_inode_info *ei = (struct coda_inode_info *) foo; diff --git a/fs/compat.c b/fs/compat.c index 8d0a0018a7d2..a7e3f162fb15 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -45,6 +45,8 @@ #include <linux/personality.h> #include <linux/rwsem.h> #include <linux/tsacct_kern.h> +#include <linux/highmem.h> +#include <linux/poll.h> #include <linux/mm.h> #include <net/sock.h> /* siocdevprivate_ioctl */ @@ -869,7 +871,7 @@ asmlinkage long compat_sys_mount(char __user * dev_name, char __user * dir_name, retval = -EINVAL; - if (type_page) { + if (type_page && data_page) { if (!strcmp((char *)type_page, SMBFS_NAME)) { do_smb_super_data_conv((void *)data_page); } else if (!strcmp((char *)type_page, NCPFS_NAME)) { @@ -1142,7 +1144,9 @@ asmlinkage long compat_sys_getdents64(unsigned int fd, lastdirent = buf.previous; if (lastdirent) { typeof(lastdirent->d_off) d_off = file->f_pos; - __put_user_unaligned(d_off, &lastdirent->d_off); + error = -EFAULT; + if (__put_user_unaligned(d_off, &lastdirent->d_off)) + goto out_putf; error = count - buf.count; } @@ -1609,14 +1613,14 @@ int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, nr &= ~1UL; while (nr) { unsigned long h, l; - __get_user(l, ufdset); - __get_user(h, ufdset+1); + if (__get_user(l, ufdset) || __get_user(h, ufdset+1)) + return -EFAULT; ufdset += 2; *fdset++ = h << 32 | l; nr -= 2; } - if (odd) - __get_user(*fdset, ufdset); + if (odd && __get_user(*fdset, ufdset)) + return -EFAULT; } else { /* Tricky, must clear full unsigned long in the * kernel fdset at the end, this makes sure that @@ -1628,14 +1632,14 @@ int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, } static -void compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, - unsigned long *fdset) +int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, + unsigned long *fdset) { unsigned long odd; nr = ROUND_UP(nr, __COMPAT_NFDBITS); if (!ufdset) - return; + return 0; odd = nr & 1UL; nr &= ~1UL; @@ -1643,13 +1647,14 @@ void compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, unsigned long h, l; l = *fdset++; h = l >> 32; - __put_user(l, ufdset); - __put_user(h, ufdset+1); + if (__put_user(l, ufdset) || __put_user(h, ufdset+1)) + return -EFAULT; ufdset += 2; nr -= 2; } - if (odd) - __put_user(*fdset, ufdset); + if (odd && __put_user(*fdset, ufdset)) + return -EFAULT; + return 0; } @@ -1724,10 +1729,10 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp, ret = 0; } - compat_set_fd_set(n, inp, fds.res_in); - compat_set_fd_set(n, outp, fds.res_out); - compat_set_fd_set(n, exp, fds.res_ex); - + if (compat_set_fd_set(n, inp, fds.res_in) || + compat_set_fd_set(n, outp, fds.res_out) || + compat_set_fd_set(n, exp, fds.res_ex)) + ret = -EFAULT; out: kfree(bits); out_nofds: diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index a91f2628c981..bcc3caf5d820 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -211,8 +211,10 @@ static int do_video_stillpicture(unsigned int fd, unsigned int cmd, unsigned lon up_native = compat_alloc_user_space(sizeof(struct video_still_picture)); - put_user(compat_ptr(fp), &up_native->iFrame); - put_user(size, &up_native->size); + err = put_user(compat_ptr(fp), &up_native->iFrame); + err |= put_user(size, &up_native->size); + if (err) + return -EFAULT; err = sys_ioctl(fd, cmd, (unsigned long) up_native); @@ -236,8 +238,10 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned err |= get_user(length, &up->length); up_native = compat_alloc_user_space(sizeof(struct video_spu_palette)); - put_user(compat_ptr(palp), &up_native->palette); - put_user(length, &up_native->length); + err = put_user(compat_ptr(palp), &up_native->palette); + err |= put_user(length, &up_native->length); + if (err) + return -EFAULT; err = sys_ioctl(fd, cmd, (unsigned long) up_native); @@ -2043,16 +2047,19 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg) struct serial_struct ss; mm_segment_t oldseg = get_fs(); __u32 udata; + unsigned int base; if (cmd == TIOCSSERIAL) { if (!access_ok(VERIFY_READ, ss32, sizeof(SS32))) return -EFAULT; if (__copy_from_user(&ss, ss32, offsetof(SS32, iomem_base))) return -EFAULT; - __get_user(udata, &ss32->iomem_base); + if (__get_user(udata, &ss32->iomem_base)) + return -EFAULT; ss.iomem_base = compat_ptr(udata); - __get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift); - __get_user(ss.port_high, &ss32->port_high); + if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) || + __get_user(ss.port_high, &ss32->port_high)) + return -EFAULT; ss.iomap_base = 0UL; } set_fs(KERNEL_DS); @@ -2063,12 +2070,12 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg) return -EFAULT; if (__copy_to_user(ss32,&ss,offsetof(SS32,iomem_base))) return -EFAULT; - __put_user((unsigned long)ss.iomem_base >> 32 ? - 0xffffffff : (unsigned)(unsigned long)ss.iomem_base, - &ss32->iomem_base); - __put_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift); - __put_user(ss.port_high, &ss32->port_high); - + base = (unsigned long)ss.iomem_base >> 32 ? + 0xffffffff : (unsigned)(unsigned long)ss.iomem_base; + if (__put_user(base, &ss32->iomem_base) || + __put_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) || + __put_user(ss.port_high, &ss32->port_high)) + return -EFAULT; } return err; } @@ -2397,6 +2404,7 @@ HANDLE_IOCTL(SIOCGIFMAP, dev_ifsioc) HANDLE_IOCTL(SIOCSIFMAP, dev_ifsioc) HANDLE_IOCTL(SIOCGIFADDR, dev_ifsioc) HANDLE_IOCTL(SIOCSIFADDR, dev_ifsioc) +HANDLE_IOCTL(SIOCSIFHWBROADCAST, dev_ifsioc) /* ioctls used by appletalk ddp.c */ HANDLE_IOCTL(SIOCATALKDIFADDR, dev_ifsioc) diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index 3f4ff7a242b9..f92cd303d2c9 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h @@ -49,7 +49,7 @@ struct configfs_dirent { #define CONFIGFS_NOT_PINNED (CONFIGFS_ITEM_ATTR) extern struct vfsmount * configfs_mount; -extern kmem_cache_t *configfs_dir_cachep; +extern struct kmem_cache *configfs_dir_cachep; extern int configfs_is_root(struct config_item *item); diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 8a3b6a1a6ad1..c398861f78a5 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -93,8 +93,8 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent * pare * * called with parent inode's i_mutex held */ -int configfs_dirent_exists(struct configfs_dirent *parent_sd, - const unsigned char *new) +static int configfs_dirent_exists(struct configfs_dirent *parent_sd, + const unsigned char *new) { struct configfs_dirent * sd; @@ -1176,8 +1176,9 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys) return; } - mutex_lock(&configfs_sb->s_root->d_inode->i_mutex); - mutex_lock(&dentry->d_inode->i_mutex); + mutex_lock_nested(&configfs_sb->s_root->d_inode->i_mutex, + I_MUTEX_PARENT); + mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD); if (configfs_detach_prep(dentry)) { printk(KERN_ERR "configfs: Tried to unregister non-empty subsystem!\n"); } diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c index 68bd5c93ca52..ed678529ebb2 100644 --- a/fs/configfs/mount.c +++ b/fs/configfs/mount.c @@ -38,7 +38,7 @@ struct vfsmount * configfs_mount = NULL; struct super_block * configfs_sb = NULL; -kmem_cache_t *configfs_dir_cachep; +struct kmem_cache *configfs_dir_cachep; static int configfs_mnt_count = 0; static struct super_operations configfs_ops = { diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index a624c3ec8189..0509cedd415c 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -481,6 +481,8 @@ static int cramfs_readpage(struct file *file, struct page * page) pgdata = kmap(page); if (compr_len == 0) ; /* hole */ + else if (compr_len > (PAGE_CACHE_SIZE << 1)) + printk(KERN_ERR "cramfs: bad compressed blocksize %u\n", compr_len); else { mutex_lock(&read_mutex); bytes_filled = cramfs_uncompress_block(pgdata, diff --git a/fs/dcache.c b/fs/dcache.c index fd4a428998ef..d68631f18df1 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -43,7 +43,7 @@ static __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); EXPORT_SYMBOL(dcache_lock); -static kmem_cache_t *dentry_cache __read_mostly; +static struct kmem_cache *dentry_cache __read_mostly; #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) @@ -68,15 +68,19 @@ struct dentry_stat_t dentry_stat = { .age_limit = 45, }; -static void d_callback(struct rcu_head *head) +static void __d_free(struct dentry *dentry) { - struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu); - if (dname_external(dentry)) kfree(dentry->d_name.name); kmem_cache_free(dentry_cache, dentry); } +static void d_callback(struct rcu_head *head) +{ + struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu); + __d_free(dentry); +} + /* * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry * inside dcache_lock. @@ -85,7 +89,11 @@ static void d_free(struct dentry *dentry) { if (dentry->d_op && dentry->d_op->d_release) dentry->d_op->d_release(dentry); - call_rcu(&dentry->d_u.d_rcu, d_callback); + /* if dentry was never inserted into hash, immediate free is OK */ + if (dentry->d_hash.pprev == NULL) + __d_free(dentry); + else + call_rcu(&dentry->d_u.d_rcu, d_callback); } /* @@ -2072,10 +2080,10 @@ static void __init dcache_init(unsigned long mempages) } /* SLAB cache for __getname() consumers */ -kmem_cache_t *names_cachep __read_mostly; +struct kmem_cache *names_cachep __read_mostly; /* SLAB cache for file structures */ -kmem_cache_t *filp_cachep __read_mostly; +struct kmem_cache *filp_cachep __read_mostly; EXPORT_SYMBOL(d_genocide); diff --git a/fs/dcookies.c b/fs/dcookies.c index 0c4b0674854b..21af1629f9bc 100644 --- a/fs/dcookies.c +++ b/fs/dcookies.c @@ -37,7 +37,7 @@ struct dcookie_struct { static LIST_HEAD(dcookie_users); static DEFINE_MUTEX(dcookie_mutex); -static kmem_cache_t *dcookie_cache __read_mostly; +static struct kmem_cache *dcookie_cache __read_mostly; static struct list_head *dcookie_hashtable __read_mostly; static size_t hash_size __read_mostly; diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index e77676df6713..137d76c3f90a 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -21,6 +21,7 @@ #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/init.h> +#include <linux/kobject.h> #include <linux/namei.h> #include <linux/debugfs.h> @@ -147,13 +148,13 @@ static int debugfs_create_by_name(const char *name, mode_t mode, *dentry = NULL; mutex_lock(&parent->d_inode->i_mutex); *dentry = lookup_one_len(name, parent, strlen(name)); - if (!IS_ERR(dentry)) { + if (!IS_ERR(*dentry)) { if ((mode & S_IFMT) == S_IFDIR) error = debugfs_mkdir(parent->d_inode, *dentry, mode); else error = debugfs_create(parent->d_inode, *dentry, mode); } else - error = PTR_ERR(dentry); + error = PTR_ERR(*dentry); mutex_unlock(&parent->d_inode->i_mutex); return error; diff --git a/fs/dlm/Kconfig b/fs/dlm/Kconfig index 81b2c6465eeb..b5654a284fef 100644 --- a/fs/dlm/Kconfig +++ b/fs/dlm/Kconfig @@ -1,14 +1,32 @@ menu "Distributed Lock Manager" - depends on INET && IP_SCTP && EXPERIMENTAL + depends on EXPERIMENTAL && INET config DLM tristate "Distributed Lock Manager (DLM)" depends on IPV6 || IPV6=n select CONFIGFS_FS + select IP_SCTP if DLM_SCTP help A general purpose distributed lock manager for kernel or userspace applications. +choice + prompt "Select DLM communications protocol" + depends on DLM + default DLM_TCP + help + The DLM Can use TCP or SCTP for it's network communications. + SCTP supports multi-homed operations whereas TCP doesn't. + However, SCTP seems to have stability problems at the moment. + +config DLM_TCP + bool "TCP/IP" + +config DLM_SCTP + bool "SCTP" + +endchoice + config DLM_DEBUG bool "DLM debugging" depends on DLM diff --git a/fs/dlm/Makefile b/fs/dlm/Makefile index 1832e0297f7d..65388944eba0 100644 --- a/fs/dlm/Makefile +++ b/fs/dlm/Makefile @@ -4,7 +4,6 @@ dlm-y := ast.o \ dir.o \ lock.o \ lockspace.o \ - lowcomms.o \ main.o \ member.o \ memory.o \ @@ -17,3 +16,6 @@ dlm-y := ast.o \ util.o dlm-$(CONFIG_DLM_DEBUG) += debug_fs.o +dlm-$(CONFIG_DLM_TCP) += lowcomms-tcp.o + +dlm-$(CONFIG_DLM_SCTP) += lowcomms-sctp.o
\ No newline at end of file diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 1e5cd67e1b7a..1ee8195e6fc0 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -471,6 +471,7 @@ struct dlm_ls { char *ls_recover_buf; int ls_recover_nodeid; /* for debugging */ uint64_t ls_rcom_seq; + spinlock_t ls_rcom_spin; struct list_head ls_recover_list; spinlock_t ls_recover_list_lock; int ls_recover_list_count; @@ -488,7 +489,8 @@ struct dlm_ls { #define LSFL_RUNNING 1 #define LSFL_RECOVERY_STOP 2 #define LSFL_RCOM_READY 3 -#define LSFL_UEVENT_WAIT 4 +#define LSFL_RCOM_WAIT 4 +#define LSFL_UEVENT_WAIT 5 /* much of this is just saving user space pointers associated with the lock that we pass back to the user lib with an ast */ diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 3f2befa4797b..30878defaeb6 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -2372,6 +2372,7 @@ static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in, static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms) { lkb->lkb_exflags = ms->m_exflags; + lkb->lkb_sbflags = ms->m_sbflags; lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) | (ms->m_flags & 0x0000FFFF); } @@ -3028,10 +3029,17 @@ int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery) while (1) { if (dlm_locking_stopped(ls)) { - if (!recovery) - dlm_add_requestqueue(ls, nodeid, hd); - error = -EINTR; - goto out; + if (recovery) { + error = -EINTR; + goto out; + } + error = dlm_add_requestqueue(ls, nodeid, hd); + if (error == -EAGAIN) + continue; + else { + error = -EINTR; + goto out; + } } if (lock_recovery_try(ls)) diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 109333c8ecb9..59012b089e8d 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -22,6 +22,7 @@ #include "memory.h" #include "lock.h" #include "recover.h" +#include "requestqueue.h" #ifdef CONFIG_DLM_DEBUG int dlm_create_debug_file(struct dlm_ls *ls); @@ -43,6 +44,10 @@ static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len) ssize_t ret = len; int n = simple_strtol(buf, NULL, 0); + ls = dlm_find_lockspace_local(ls->ls_local_handle); + if (!ls) + return -EINVAL; + switch (n) { case 0: dlm_ls_stop(ls); @@ -53,6 +58,7 @@ static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len) default: ret = -EINVAL; } + dlm_put_lockspace(ls); return ret; } @@ -143,6 +149,12 @@ static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr, return a->store ? a->store(ls, buf, len) : len; } +static void lockspace_kobj_release(struct kobject *k) +{ + struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj); + kfree(ls); +} + static struct sysfs_ops dlm_attr_ops = { .show = dlm_attr_show, .store = dlm_attr_store, @@ -151,6 +163,7 @@ static struct sysfs_ops dlm_attr_ops = { static struct kobj_type dlm_ktype = { .default_attrs = dlm_attrs, .sysfs_ops = &dlm_attr_ops, + .release = lockspace_kobj_release, }; static struct kset dlm_kset = { @@ -466,6 +479,8 @@ static int new_lockspace(char *name, int namelen, void **lockspace, ls->ls_recoverd_task = NULL; mutex_init(&ls->ls_recoverd_active); spin_lock_init(&ls->ls_recover_lock); + spin_lock_init(&ls->ls_rcom_spin); + get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t)); ls->ls_recover_status = 0; ls->ls_recover_seq = 0; ls->ls_recover_args = NULL; @@ -672,13 +687,14 @@ static int release_lockspace(struct dlm_ls *ls, int force) * Free structures on any other lists */ + dlm_purge_requestqueue(ls); kfree(ls->ls_recover_args); dlm_clear_free_entries(ls); dlm_clear_members(ls); dlm_clear_members_gone(ls); kfree(ls->ls_node_array); kobject_unregister(&ls->ls_kobj); - kfree(ls); + /* The ls structure will be freed when the kobject is done with */ mutex_lock(&ls_lock); ls_count--; diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms-sctp.c index 6da6b14d5a61..fe158d7a9285 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms-sctp.c @@ -2,7 +2,7 @@ ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. -** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. +** Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions @@ -75,13 +75,13 @@ struct nodeinfo { }; static DEFINE_IDR(nodeinfo_idr); -static struct rw_semaphore nodeinfo_lock; -static int max_nodeid; +static DECLARE_RWSEM(nodeinfo_lock); +static int max_nodeid; struct cbuf { - unsigned base; - unsigned len; - unsigned mask; + unsigned int base; + unsigned int len; + unsigned int mask; }; /* Just the one of these, now. But this struct keeps @@ -90,9 +90,9 @@ struct cbuf { #define CF_READ_PENDING 1 struct connection { - struct socket *sock; + struct socket *sock; unsigned long flags; - struct page *rx_page; + struct page *rx_page; atomic_t waiting_requests; struct cbuf cb; int eagain_flag; @@ -102,36 +102,40 @@ struct connection { struct writequeue_entry { struct list_head list; - struct page *page; + struct page *page; int offset; int len; int end; int users; - struct nodeinfo *ni; + struct nodeinfo *ni; }; -#define CBUF_ADD(cb, n) do { (cb)->len += n; } while(0) -#define CBUF_EMPTY(cb) ((cb)->len == 0) -#define CBUF_MAY_ADD(cb, n) (((cb)->len + (n)) < ((cb)->mask + 1)) -#define CBUF_DATA(cb) (((cb)->base + (cb)->len) & (cb)->mask) +static void cbuf_add(struct cbuf *cb, int n) +{ + cb->len += n; +} -#define CBUF_INIT(cb, size) \ -do { \ - (cb)->base = (cb)->len = 0; \ - (cb)->mask = ((size)-1); \ -} while(0) +static int cbuf_data(struct cbuf *cb) +{ + return ((cb->base + cb->len) & cb->mask); +} -#define CBUF_EAT(cb, n) \ -do { \ - (cb)->len -= (n); \ - (cb)->base += (n); \ - (cb)->base &= (cb)->mask; \ -} while(0) +static void cbuf_init(struct cbuf *cb, int size) +{ + cb->base = cb->len = 0; + cb->mask = size-1; +} +static void cbuf_eat(struct cbuf *cb, int n) +{ + cb->len -= n; + cb->base += n; + cb->base &= cb->mask; +} /* List of nodes which have writes pending */ -static struct list_head write_nodes; -static spinlock_t write_nodes_lock; +static LIST_HEAD(write_nodes); +static DEFINE_SPINLOCK(write_nodes_lock); /* Maximum number of incoming messages to process before * doing a schedule() @@ -141,8 +145,7 @@ static spinlock_t write_nodes_lock; /* Manage daemons */ static struct task_struct *recv_task; static struct task_struct *send_task; -static wait_queue_head_t lowcomms_recv_wait; -static atomic_t accepting; +static DECLARE_WAIT_QUEUE_HEAD(lowcomms_recv_wait); /* The SCTP connection */ static struct connection sctp_con; @@ -161,11 +164,11 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr) return error; if (dlm_local_addr[0]->ss_family == AF_INET) { - struct sockaddr_in *in4 = (struct sockaddr_in *) &addr; + struct sockaddr_in *in4 = (struct sockaddr_in *) &addr; struct sockaddr_in *ret4 = (struct sockaddr_in *) retaddr; ret4->sin_addr.s_addr = in4->sin_addr.s_addr; } else { - struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr; + struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr; struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr; memcpy(&ret6->sin6_addr, &in6->sin6_addr, sizeof(in6->sin6_addr)); @@ -174,6 +177,8 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr) return 0; } +/* If alloc is 0 here we will not attempt to allocate a new + nodeinfo struct */ static struct nodeinfo *nodeid2nodeinfo(int nodeid, gfp_t alloc) { struct nodeinfo *ni; @@ -184,44 +189,45 @@ static struct nodeinfo *nodeid2nodeinfo(int nodeid, gfp_t alloc) ni = idr_find(&nodeinfo_idr, nodeid); up_read(&nodeinfo_lock); - if (!ni && alloc) { - down_write(&nodeinfo_lock); + if (ni || !alloc) + return ni; - ni = idr_find(&nodeinfo_idr, nodeid); - if (ni) - goto out_up; + down_write(&nodeinfo_lock); - r = idr_pre_get(&nodeinfo_idr, alloc); - if (!r) - goto out_up; + ni = idr_find(&nodeinfo_idr, nodeid); + if (ni) + goto out_up; - ni = kmalloc(sizeof(struct nodeinfo), alloc); - if (!ni) - goto out_up; + r = idr_pre_get(&nodeinfo_idr, alloc); + if (!r) + goto out_up; - r = idr_get_new_above(&nodeinfo_idr, ni, nodeid, &n); - if (r) { - kfree(ni); - ni = NULL; - goto out_up; - } - if (n != nodeid) { - idr_remove(&nodeinfo_idr, n); - kfree(ni); - ni = NULL; - goto out_up; - } - memset(ni, 0, sizeof(struct nodeinfo)); - spin_lock_init(&ni->lock); - INIT_LIST_HEAD(&ni->writequeue); - spin_lock_init(&ni->writequeue_lock); - ni->nodeid = nodeid; - - if (nodeid > max_nodeid) - max_nodeid = nodeid; - out_up: - up_write(&nodeinfo_lock); + ni = kmalloc(sizeof(struct nodeinfo), alloc); + if (!ni) + goto out_up; + + r = idr_get_new_above(&nodeinfo_idr, ni, nodeid, &n); + if (r) { + kfree(ni); + ni = NULL; + goto out_up; } + if (n != nodeid) { + idr_remove(&nodeinfo_idr, n); + kfree(ni); + ni = NULL; + goto out_up; + } + memset(ni, 0, sizeof(struct nodeinfo)); + spin_lock_init(&ni->lock); + INIT_LIST_HEAD(&ni->writequeue); + spin_lock_init(&ni->writequeue_lock); + ni->nodeid = nodeid; + + if (nodeid > max_nodeid) + max_nodeid = nodeid; +out_up: + up_write(&nodeinfo_lock); return ni; } @@ -279,13 +285,13 @@ static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port, in4_addr->sin_port = cpu_to_be16(port); memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero)); memset(in4_addr+1, 0, sizeof(struct sockaddr_storage) - - sizeof(struct sockaddr_in)); + sizeof(struct sockaddr_in)); *addr_len = sizeof(struct sockaddr_in); } else { struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr; in6_addr->sin6_port = cpu_to_be16(port); memset(in6_addr+1, 0, sizeof(struct sockaddr_storage) - - sizeof(struct sockaddr_in6)); + sizeof(struct sockaddr_in6)); *addr_len = sizeof(struct sockaddr_in6); } } @@ -324,7 +330,7 @@ static void send_shutdown(sctp_assoc_t associd) cmsg->cmsg_type = SCTP_SNDRCV; cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); outmessage.msg_controllen = cmsg->cmsg_len; - sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); + sinfo = CMSG_DATA(cmsg); memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); sinfo->sinfo_flags |= MSG_EOF; @@ -387,7 +393,7 @@ static void process_sctp_notification(struct msghdr *msg, char *buf) if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) { log_print("COMM_UP for invalid assoc ID %d", - (int)sn->sn_assoc_change.sac_assoc_id); + (int)sn->sn_assoc_change.sac_assoc_id); init_failed(); return; } @@ -398,15 +404,18 @@ static void process_sctp_notification(struct msghdr *msg, char *buf) fs = get_fs(); set_fs(get_ds()); ret = sctp_con.sock->ops->getsockopt(sctp_con.sock, - IPPROTO_SCTP, SCTP_PRIMARY_ADDR, - (char*)&prim, &prim_len); + IPPROTO_SCTP, + SCTP_PRIMARY_ADDR, + (char*)&prim, + &prim_len); set_fs(fs); if (ret < 0) { struct nodeinfo *ni; log_print("getsockopt/sctp_primary_addr on " "new assoc %d failed : %d", - (int)sn->sn_assoc_change.sac_assoc_id, ret); + (int)sn->sn_assoc_change.sac_assoc_id, + ret); /* Retry INIT later */ ni = assoc2nodeinfo(sn->sn_assoc_change.sac_assoc_id); @@ -426,12 +435,10 @@ static void process_sctp_notification(struct msghdr *msg, char *buf) return; /* Save the assoc ID */ - spin_lock(&ni->lock); ni->assoc_id = sn->sn_assoc_change.sac_assoc_id; - spin_unlock(&ni->lock); log_print("got new/restarted association %d nodeid %d", - (int)sn->sn_assoc_change.sac_assoc_id, nodeid); + (int)sn->sn_assoc_change.sac_assoc_id, nodeid); /* Send any pending writes */ clear_bit(NI_INIT_PENDING, &ni->flags); @@ -507,13 +514,12 @@ static int receive_from_sock(void) sctp_con.rx_page = alloc_page(GFP_ATOMIC); if (sctp_con.rx_page == NULL) goto out_resched; - CBUF_INIT(&sctp_con.cb, PAGE_CACHE_SIZE); + cbuf_init(&sctp_con.cb, PAGE_CACHE_SIZE); } memset(&incmsg, 0, sizeof(incmsg)); memset(&msgname, 0, sizeof(msgname)); - memset(incmsg, 0, sizeof(incmsg)); msg.msg_name = &msgname; msg.msg_namelen = sizeof(msgname); msg.msg_flags = 0; @@ -532,17 +538,17 @@ static int receive_from_sock(void) * iov[0] is the bit of the circular buffer between the current end * point (cb.base + cb.len) and the end of the buffer. */ - iov[0].iov_len = sctp_con.cb.base - CBUF_DATA(&sctp_con.cb); + iov[0].iov_len = sctp_con.cb.base - cbuf_data(&sctp_con.cb); iov[0].iov_base = page_address(sctp_con.rx_page) + - CBUF_DATA(&sctp_con.cb); + cbuf_data(&sctp_con.cb); iov[1].iov_len = 0; /* * iov[1] is the bit of the circular buffer between the start of the * buffer and the start of the currently used section (cb.base) */ - if (CBUF_DATA(&sctp_con.cb) >= sctp_con.cb.base) { - iov[0].iov_len = PAGE_CACHE_SIZE - CBUF_DATA(&sctp_con.cb); + if (cbuf_data(&sctp_con.cb) >= sctp_con.cb.base) { + iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&sctp_con.cb); iov[1].iov_len = sctp_con.cb.base; iov[1].iov_base = page_address(sctp_con.rx_page); msg.msg_iovlen = 2; @@ -557,7 +563,7 @@ static int receive_from_sock(void) msg.msg_control = incmsg; msg.msg_controllen = sizeof(incmsg); cmsg = CMSG_FIRSTHDR(&msg); - sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); + sinfo = CMSG_DATA(cmsg); if (msg.msg_flags & MSG_NOTIFICATION) { process_sctp_notification(&msg, page_address(sctp_con.rx_page)); @@ -583,29 +589,29 @@ static int receive_from_sock(void) if (r == 1) return 0; - CBUF_ADD(&sctp_con.cb, ret); + cbuf_add(&sctp_con.cb, ret); ret = dlm_process_incoming_buffer(cpu_to_le32(sinfo->sinfo_ppid), page_address(sctp_con.rx_page), sctp_con.cb.base, sctp_con.cb.len, PAGE_CACHE_SIZE); if (ret < 0) goto out_close; - CBUF_EAT(&sctp_con.cb, ret); + cbuf_eat(&sctp_con.cb, ret); - out: +out: ret = 0; goto out_ret; - out_resched: +out_resched: lowcomms_data_ready(sctp_con.sock->sk, 0); ret = 0; - schedule(); + cond_resched(); goto out_ret; - out_close: +out_close: if (ret != -EAGAIN) log_print("error reading from sctp socket: %d", ret); - out_ret: +out_ret: return ret; } @@ -619,10 +625,12 @@ static int add_bind_addr(struct sockaddr_storage *addr, int addr_len, int num) set_fs(get_ds()); if (num == 1) result = sctp_con.sock->ops->bind(sctp_con.sock, - (struct sockaddr *) addr, addr_len); + (struct sockaddr *) addr, + addr_len); else result = sctp_con.sock->ops->setsockopt(sctp_con.sock, SOL_SCTP, - SCTP_SOCKOPT_BINDX_ADD, (char *)addr, addr_len); + SCTP_SOCKOPT_BINDX_ADD, + (char *)addr, addr_len); set_fs(fs); if (result < 0) @@ -719,10 +727,10 @@ static int init_sock(void) return 0; - create_delsock: +create_delsock: sock_release(sock); sctp_con.sock = NULL; - out: +out: return result; } @@ -756,16 +764,13 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) int users = 0; struct nodeinfo *ni; - if (!atomic_read(&accepting)) - return NULL; - ni = nodeid2nodeinfo(nodeid, allocation); if (!ni) return NULL; spin_lock(&ni->writequeue_lock); e = list_entry(ni->writequeue.prev, struct writequeue_entry, list); - if (((struct list_head *) e == &ni->writequeue) || + if ((&e->list == &ni->writequeue) || (PAGE_CACHE_SIZE - e->end < len)) { e = NULL; } else { @@ -776,7 +781,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) spin_unlock(&ni->writequeue_lock); if (e) { - got_one: + got_one: if (users == 0) kmap(e->page); *ppc = page_address(e->page) + offset; @@ -803,9 +808,6 @@ void dlm_lowcomms_commit_buffer(void *arg) int users; struct nodeinfo *ni = e->ni; - if (!atomic_read(&accepting)) - return; - spin_lock(&ni->writequeue_lock); users = --e->users; if (users) @@ -822,7 +824,7 @@ void dlm_lowcomms_commit_buffer(void *arg) } return; - out: +out: spin_unlock(&ni->writequeue_lock); return; } @@ -878,7 +880,7 @@ static void initiate_association(int nodeid) cmsg->cmsg_level = IPPROTO_SCTP; cmsg->cmsg_type = SCTP_SNDRCV; cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); - sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); + sinfo = CMSG_DATA(cmsg); memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid); @@ -892,7 +894,7 @@ static void initiate_association(int nodeid) } /* Send a message */ -static int send_to_sock(struct nodeinfo *ni) +static void send_to_sock(struct nodeinfo *ni) { int ret = 0; struct writequeue_entry *e; @@ -903,13 +905,13 @@ static int send_to_sock(struct nodeinfo *ni) struct sctp_sndrcvinfo *sinfo; struct kvec iov; - /* See if we need to init an association before we start + /* See if we need to init an association before we start sending precious messages */ spin_lock(&ni->lock); if (!ni->assoc_id && !test_and_set_bit(NI_INIT_PENDING, &ni->flags)) { spin_unlock(&ni->lock); initiate_association(ni->nodeid); - return 0; + return; } spin_unlock(&ni->lock); @@ -923,7 +925,7 @@ static int send_to_sock(struct nodeinfo *ni) cmsg->cmsg_level = IPPROTO_SCTP; cmsg->cmsg_type = SCTP_SNDRCV; cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); - sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); + sinfo = CMSG_DATA(cmsg); memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid); sinfo->sinfo_assoc_id = ni->assoc_id; @@ -955,7 +957,7 @@ static int send_to_sock(struct nodeinfo *ni) goto send_error; } else { /* Don't starve people filling buffers */ - schedule(); + cond_resched(); } spin_lock(&ni->writequeue_lock); @@ -964,15 +966,16 @@ static int send_to_sock(struct nodeinfo *ni) if (e->len == 0 && e->users == 0) { list_del(&e->list); + kunmap(e->page); free_entry(e); continue; } } spin_unlock(&ni->writequeue_lock); - out: - return ret; +out: + return; - send_error: +send_error: log_print("Error sending to node %d %d", ni->nodeid, ret); spin_lock(&ni->lock); if (!test_and_set_bit(NI_INIT_PENDING, &ni->flags)) { @@ -982,7 +985,7 @@ static int send_to_sock(struct nodeinfo *ni) } else spin_unlock(&ni->lock); - return ret; + return; } /* Try to send any messages that are pending */ @@ -994,7 +997,7 @@ static void process_output_queue(void) spin_lock_bh(&write_nodes_lock); list_for_each_safe(list, temp, &write_nodes) { struct nodeinfo *ni = - list_entry(list, struct nodeinfo, write_list); + list_entry(list, struct nodeinfo, write_list); clear_bit(NI_WRITE_PENDING, &ni->flags); list_del(&ni->write_list); @@ -1106,7 +1109,7 @@ static int dlm_recvd(void *data) set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&lowcomms_recv_wait, &wait); if (!test_bit(CF_READ_PENDING, &sctp_con.flags)) - schedule(); + cond_resched(); remove_wait_queue(&lowcomms_recv_wait, &wait); set_current_state(TASK_RUNNING); @@ -1118,12 +1121,12 @@ static int dlm_recvd(void *data) /* Don't starve out everyone else */ if (++count >= MAX_RX_MSG_COUNT) { - schedule(); + cond_resched(); count = 0; } } while (!kthread_should_stop() && ret >=0); } - schedule(); + cond_resched(); } return 0; @@ -1138,7 +1141,7 @@ static int dlm_sendd(void *data) while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); if (write_list_empty()) - schedule(); + cond_resched(); set_current_state(TASK_RUNNING); if (sctp_con.eagain_flag) { @@ -1166,7 +1169,7 @@ static int daemons_start(void) p = kthread_run(dlm_recvd, NULL, "dlm_recvd"); error = IS_ERR(p); - if (error) { + if (error) { log_print("can't start dlm_recvd %d", error); return error; } @@ -1174,7 +1177,7 @@ static int daemons_start(void) p = kthread_run(dlm_sendd, NULL, "dlm_sendd"); error = IS_ERR(p); - if (error) { + if (error) { log_print("can't start dlm_sendd %d", error); kthread_stop(recv_task); return error; @@ -1197,43 +1200,28 @@ int dlm_lowcomms_start(void) error = daemons_start(); if (error) goto fail_sock; - atomic_set(&accepting, 1); return 0; - fail_sock: +fail_sock: close_connection(); return error; } -/* Set all the activity flags to prevent any socket activity. */ - void dlm_lowcomms_stop(void) { - atomic_set(&accepting, 0); + int i; + sctp_con.flags = 0x7; daemons_stop(); clean_writequeues(); close_connection(); dealloc_nodeinfo(); max_nodeid = 0; -} -int dlm_lowcomms_init(void) -{ - init_waitqueue_head(&lowcomms_recv_wait); - spin_lock_init(&write_nodes_lock); - INIT_LIST_HEAD(&write_nodes); - init_rwsem(&nodeinfo_lock); - return 0; -} - -void dlm_lowcomms_exit(void) -{ - int i; + dlm_local_count = 0; + dlm_local_nodeid = 0; for (i = 0; i < dlm_local_count; i++) kfree(dlm_local_addr[i]); - dlm_local_count = 0; - dlm_local_nodeid = 0; } diff --git a/fs/dlm/lowcomms-tcp.c b/fs/dlm/lowcomms-tcp.c new file mode 100644 index 000000000000..8f2791fc8447 --- /dev/null +++ b/fs/dlm/lowcomms-tcp.c @@ -0,0 +1,1189 @@ +/****************************************************************************** +******************************************************************************* +** +** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. +** Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. +** +** This copyrighted material is made available to anyone wishing to use, +** modify, copy, or redistribute it subject to the terms and conditions +** of the GNU General Public License v.2. +** +******************************************************************************* +******************************************************************************/ + +/* + * lowcomms.c + * + * This is the "low-level" comms layer. + * + * It is responsible for sending/receiving messages + * from other nodes in the cluster. + * + * Cluster nodes are referred to by their nodeids. nodeids are + * simply 32 bit numbers to the locking module - if they need to + * be expanded for the cluster infrastructure then that is it's + * responsibility. It is this layer's + * responsibility to resolve these into IP address or + * whatever it needs for inter-node communication. + * + * The comms level is two kernel threads that deal mainly with + * the receiving of messages from other nodes and passing them + * up to the mid-level comms layer (which understands the + * message format) for execution by the locking core, and + * a send thread which does all the setting up of connections + * to remote nodes and the sending of data. Threads are not allowed + * to send their own data because it may cause them to wait in times + * of high load. Also, this way, the sending thread can collect together + * messages bound for one node and send them in one block. + * + * I don't see any problem with the recv thread executing the locking + * code on behalf of remote processes as the locking code is + * short, efficient and never waits. + * + */ + + +#include <asm/ioctls.h> +#include <net/sock.h> +#include <net/tcp.h> +#include <linux/pagemap.h> + +#include "dlm_internal.h" +#include "lowcomms.h" +#include "midcomms.h" +#include "config.h" + +struct cbuf { + unsigned int base; + unsigned int len; + unsigned int mask; +}; + +#define NODE_INCREMENT 32 +static void cbuf_add(struct cbuf *cb, int n) +{ + cb->len += n; +} + +static int cbuf_data(struct cbuf *cb) +{ + return ((cb->base + cb->len) & cb->mask); +} + +static void cbuf_init(struct cbuf *cb, int size) +{ + cb->base = cb->len = 0; + cb->mask = size-1; +} + +static void cbuf_eat(struct cbuf *cb, int n) +{ + cb->len -= n; + cb->base += n; + cb->base &= cb->mask; +} + +static bool cbuf_empty(struct cbuf *cb) +{ + return cb->len == 0; +} + +/* Maximum number of incoming messages to process before + doing a cond_resched() +*/ +#define MAX_RX_MSG_COUNT 25 + +struct connection { + struct socket *sock; /* NULL if not connected */ + uint32_t nodeid; /* So we know who we are in the list */ + struct rw_semaphore sock_sem; /* Stop connect races */ + struct list_head read_list; /* On this list when ready for reading */ + struct list_head write_list; /* On this list when ready for writing */ + struct list_head state_list; /* On this list when ready to connect */ + unsigned long flags; /* bit 1,2 = We are on the read/write lists */ +#define CF_READ_PENDING 1 +#define CF_WRITE_PENDING 2 +#define CF_CONNECT_PENDING 3 +#define CF_IS_OTHERCON 4 + struct list_head writequeue; /* List of outgoing writequeue_entries */ + struct list_head listenlist; /* List of allocated listening sockets */ + spinlock_t writequeue_lock; + int (*rx_action) (struct connection *); /* What to do when active */ + struct page *rx_page; + struct cbuf cb; + int retries; + atomic_t waiting_requests; +#define MAX_CONNECT_RETRIES 3 + struct connection *othercon; +}; +#define sock2con(x) ((struct connection *)(x)->sk_user_data) + +/* An entry waiting to be sent */ +struct writequeue_entry { + struct list_head list; + struct page *page; + int offset; + int len; + int end; + int users; + struct connection *con; +}; + +static struct sockaddr_storage dlm_local_addr; + +/* Manage daemons */ +static struct task_struct *recv_task; +static struct task_struct *send_task; + +static wait_queue_t lowcomms_send_waitq_head; +static DECLARE_WAIT_QUEUE_HEAD(lowcomms_send_waitq); +static wait_queue_t lowcomms_recv_waitq_head; +static DECLARE_WAIT_QUEUE_HEAD(lowcomms_recv_waitq); + +/* An array of pointers to connections, indexed by NODEID */ +static struct connection **connections; +static DECLARE_MUTEX(connections_lock); +static kmem_cache_t *con_cache; +static int conn_array_size; + +/* List of sockets that have reads pending */ +static LIST_HEAD(read_sockets); +static DEFINE_SPINLOCK(read_sockets_lock); + +/* List of sockets which have writes pending */ +static LIST_HEAD(write_sockets); +static DEFINE_SPINLOCK(write_sockets_lock); + +/* List of sockets which have connects pending */ +static LIST_HEAD(state_sockets); +static DEFINE_SPINLOCK(state_sockets_lock); + +static struct connection *nodeid2con(int nodeid, gfp_t allocation) +{ + struct connection *con = NULL; + + down(&connections_lock); + if (nodeid >= conn_array_size) { + int new_size = nodeid + NODE_INCREMENT; + struct connection **new_conns; + + new_conns = kzalloc(sizeof(struct connection *) * + new_size, allocation); + if (!new_conns) + goto finish; + + memcpy(new_conns, connections, sizeof(struct connection *) * conn_array_size); + conn_array_size = new_size; + kfree(connections); + connections = new_conns; + + } + + con = connections[nodeid]; + if (con == NULL && allocation) { + con = kmem_cache_zalloc(con_cache, allocation); + if (!con) + goto finish; + + con->nodeid = nodeid; + init_rwsem(&con->sock_sem); + INIT_LIST_HEAD(&con->writequeue); + spin_lock_init(&con->writequeue_lock); + + connections[nodeid] = con; + } + +finish: + up(&connections_lock); + return con; +} + +/* Data available on socket or listen socket received a connect */ +static void lowcomms_data_ready(struct sock *sk, int count_unused) +{ + struct connection *con = sock2con(sk); + + atomic_inc(&con->waiting_requests); + if (test_and_set_bit(CF_READ_PENDING, &con->flags)) + return; + + spin_lock_bh(&read_sockets_lock); + list_add_tail(&con->read_list, &read_sockets); + spin_unlock_bh(&read_sockets_lock); + + wake_up_interruptible(&lowcomms_recv_waitq); +} + +static void lowcomms_write_space(struct sock *sk) +{ + struct connection *con = sock2con(sk); + + if (test_and_set_bit(CF_WRITE_PENDING, &con->flags)) + return; + + spin_lock_bh(&write_sockets_lock); + list_add_tail(&con->write_list, &write_sockets); + spin_unlock_bh(&write_sockets_lock); + + wake_up_interruptible(&lowcomms_send_waitq); +} + +static inline void lowcomms_connect_sock(struct connection *con) +{ + if (test_and_set_bit(CF_CONNECT_PENDING, &con->flags)) + return; + + spin_lock_bh(&state_sockets_lock); + list_add_tail(&con->state_list, &state_sockets); + spin_unlock_bh(&state_sockets_lock); + + wake_up_interruptible(&lowcomms_send_waitq); +} + +static void lowcomms_state_change(struct sock *sk) +{ + if (sk->sk_state == TCP_ESTABLISHED) + lowcomms_write_space(sk); +} + +/* Make a socket active */ +static int add_sock(struct socket *sock, struct connection *con) +{ + con->sock = sock; + + /* Install a data_ready callback */ + con->sock->sk->sk_data_ready = lowcomms_data_ready; + con->sock->sk->sk_write_space = lowcomms_write_space; + con->sock->sk->sk_state_change = lowcomms_state_change; + + return 0; +} + +/* Add the port number to an IP6 or 4 sockaddr and return the address + length */ +static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port, + int *addr_len) +{ + saddr->ss_family = dlm_local_addr.ss_family; + if (saddr->ss_family == AF_INET) { + struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr; + in4_addr->sin_port = cpu_to_be16(port); + *addr_len = sizeof(struct sockaddr_in); + } else { + struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr; + in6_addr->sin6_port = cpu_to_be16(port); + *addr_len = sizeof(struct sockaddr_in6); + } +} + +/* Close a remote connection and tidy up */ +static void close_connection(struct connection *con, bool and_other) +{ + down_write(&con->sock_sem); + + if (con->sock) { + sock_release(con->sock); + con->sock = NULL; + } + if (con->othercon && and_other) { + /* Will only re-enter once. */ + close_connection(con->othercon, false); + } + if (con->rx_page) { + __free_page(con->rx_page); + con->rx_page = NULL; + } + con->retries = 0; + up_write(&con->sock_sem); +} + +/* Data received from remote end */ +static int receive_from_sock(struct connection *con) +{ + int ret = 0; + struct msghdr msg; + struct iovec iov[2]; + mm_segment_t fs; + unsigned len; + int r; + int call_again_soon = 0; + + down_read(&con->sock_sem); + + if (con->sock == NULL) + goto out; + if (con->rx_page == NULL) { + /* + * This doesn't need to be atomic, but I think it should + * improve performance if it is. + */ + con->rx_page = alloc_page(GFP_ATOMIC); + if (con->rx_page == NULL) + goto out_resched; + cbuf_init(&con->cb, PAGE_CACHE_SIZE); + } + + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_iovlen = 1; + msg.msg_iov = iov; + msg.msg_name = NULL; + msg.msg_namelen = 0; + msg.msg_flags = 0; + + /* + * iov[0] is the bit of the circular buffer between the current end + * point (cb.base + cb.len) and the end of the buffer. + */ + iov[0].iov_len = con->cb.base - cbuf_data(&con->cb); + iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb); + iov[1].iov_len = 0; + + /* + * iov[1] is the bit of the circular buffer between the start of the + * buffer and the start of the currently used section (cb.base) + */ + if (cbuf_data(&con->cb) >= con->cb.base) { + iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb); + iov[1].iov_len = con->cb.base; + iov[1].iov_base = page_address(con->rx_page); + msg.msg_iovlen = 2; + } + len = iov[0].iov_len + iov[1].iov_len; + + fs = get_fs(); + set_fs(get_ds()); + r = ret = sock_recvmsg(con->sock, &msg, len, + MSG_DONTWAIT | MSG_NOSIGNAL); + set_fs(fs); + + if (ret <= 0) + goto out_close; + if (ret == len) + call_again_soon = 1; + cbuf_add(&con->cb, ret); + ret = dlm_process_incoming_buffer(con->nodeid, + page_address(con->rx_page), + con->cb.base, con->cb.len, + PAGE_CACHE_SIZE); + if (ret == -EBADMSG) { + printk(KERN_INFO "dlm: lowcomms: addr=%p, base=%u, len=%u, " + "iov_len=%u, iov_base[0]=%p, read=%d\n", + page_address(con->rx_page), con->cb.base, con->cb.len, + len, iov[0].iov_base, r); + } + if (ret < 0) + goto out_close; + cbuf_eat(&con->cb, ret); + + if (cbuf_empty(&con->cb) && !call_again_soon) { + __free_page(con->rx_page); + con->rx_page = NULL; + } + +out: + if (call_again_soon) + goto out_resched; + up_read(&con->sock_sem); + return 0; + +out_resched: + lowcomms_data_ready(con->sock->sk, 0); + up_read(&con->sock_sem); + cond_resched(); + return 0; + +out_close: + up_read(&con->sock_sem); + if (ret != -EAGAIN && !test_bit(CF_IS_OTHERCON, &con->flags)) { + close_connection(con, false); + /* Reconnect when there is something to send */ + } + + return ret; +} + +/* Listening socket is busy, accept a connection */ +static int accept_from_sock(struct connection *con) +{ + int result; + struct sockaddr_storage peeraddr; + struct socket *newsock; + int len; + int nodeid; + struct connection *newcon; + + memset(&peeraddr, 0, sizeof(peeraddr)); + result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM, + IPPROTO_TCP, &newsock); + if (result < 0) + return -ENOMEM; + + down_read(&con->sock_sem); + + result = -ENOTCONN; + if (con->sock == NULL) + goto accept_err; + + newsock->type = con->sock->type; + newsock->ops = con->sock->ops; + + result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK); + if (result < 0) + goto accept_err; + + /* Get the connected socket's peer */ + memset(&peeraddr, 0, sizeof(peeraddr)); + if (newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, + &len, 2)) { + result = -ECONNABORTED; + goto accept_err; + } + + /* Get the new node's NODEID */ + make_sockaddr(&peeraddr, 0, &len); + if (dlm_addr_to_nodeid(&peeraddr, &nodeid)) { + printk("dlm: connect from non cluster node\n"); + sock_release(newsock); + up_read(&con->sock_sem); + return -1; + } + + log_print("got connection from %d", nodeid); + + /* Check to see if we already have a connection to this node. This + * could happen if the two nodes initiate a connection at roughly + * the same time and the connections cross on the wire. + * TEMPORARY FIX: + * In this case we store the incoming one in "othercon" + */ + newcon = nodeid2con(nodeid, GFP_KERNEL); + if (!newcon) { + result = -ENOMEM; + goto accept_err; + } + down_write(&newcon->sock_sem); + if (newcon->sock) { + struct connection *othercon = newcon->othercon; + + if (!othercon) { + othercon = kmem_cache_zalloc(con_cache, GFP_KERNEL); + if (!othercon) { + printk("dlm: failed to allocate incoming socket\n"); + up_write(&newcon->sock_sem); + result = -ENOMEM; + goto accept_err; + } + othercon->nodeid = nodeid; + othercon->rx_action = receive_from_sock; + init_rwsem(&othercon->sock_sem); + set_bit(CF_IS_OTHERCON, &othercon->flags); + newcon->othercon = othercon; + } + othercon->sock = newsock; + newsock->sk->sk_user_data = othercon; + add_sock(newsock, othercon); + } + else { + newsock->sk->sk_user_data = newcon; + newcon->rx_action = receive_from_sock; + add_sock(newsock, newcon); + + } + + up_write(&newcon->sock_sem); + + /* + * Add it to the active queue in case we got data + * beween processing the accept adding the socket + * to the read_sockets list + */ + lowcomms_data_ready(newsock->sk, 0); + up_read(&con->sock_sem); + + return 0; + +accept_err: + up_read(&con->sock_sem); + sock_release(newsock); + + if (result != -EAGAIN) + printk("dlm: error accepting connection from node: %d\n", result); + return result; +} + +/* Connect a new socket to its peer */ +static void connect_to_sock(struct connection *con) +{ + int result = -EHOSTUNREACH; + struct sockaddr_storage saddr; + int addr_len; + struct socket *sock; + + if (con->nodeid == 0) { + log_print("attempt to connect sock 0 foiled"); + return; + } + + down_write(&con->sock_sem); + if (con->retries++ > MAX_CONNECT_RETRIES) + goto out; + + /* Some odd races can cause double-connects, ignore them */ + if (con->sock) { + result = 0; + goto out; + } + + /* Create a socket to communicate with */ + result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM, + IPPROTO_TCP, &sock); + if (result < 0) + goto out_err; + + memset(&saddr, 0, sizeof(saddr)); + if (dlm_nodeid_to_addr(con->nodeid, &saddr)) + goto out_err; + + sock->sk->sk_user_data = con; + con->rx_action = receive_from_sock; + + make_sockaddr(&saddr, dlm_config.tcp_port, &addr_len); + + add_sock(sock, con); + + log_print("connecting to %d", con->nodeid); + result = + sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len, + O_NONBLOCK); + if (result == -EINPROGRESS) + result = 0; + if (result == 0) + goto out; + +out_err: + if (con->sock) { + sock_release(con->sock); + con->sock = NULL; + } + /* + * Some errors are fatal and this list might need adjusting. For other + * errors we try again until the max number of retries is reached. + */ + if (result != -EHOSTUNREACH && result != -ENETUNREACH && + result != -ENETDOWN && result != EINVAL + && result != -EPROTONOSUPPORT) { + lowcomms_connect_sock(con); + result = 0; + } +out: + up_write(&con->sock_sem); + return; +} + +static struct socket *create_listen_sock(struct connection *con, + struct sockaddr_storage *saddr) +{ + struct socket *sock = NULL; + mm_segment_t fs; + int result = 0; + int one = 1; + int addr_len; + + if (dlm_local_addr.ss_family == AF_INET) + addr_len = sizeof(struct sockaddr_in); + else + addr_len = sizeof(struct sockaddr_in6); + + /* Create a socket to communicate with */ + result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM, IPPROTO_TCP, &sock); + if (result < 0) { + printk("dlm: Can't create listening comms socket\n"); + goto create_out; + } + + fs = get_fs(); + set_fs(get_ds()); + result = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, + (char *)&one, sizeof(one)); + set_fs(fs); + if (result < 0) { + printk("dlm: Failed to set SO_REUSEADDR on socket: result=%d\n", + result); + } + sock->sk->sk_user_data = con; + con->rx_action = accept_from_sock; + con->sock = sock; + + /* Bind to our port */ + make_sockaddr(saddr, dlm_config.tcp_port, &addr_len); + result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len); + if (result < 0) { + printk("dlm: Can't bind to port %d\n", dlm_config.tcp_port); + sock_release(sock); + sock = NULL; + con->sock = NULL; + goto create_out; + } + + fs = get_fs(); + set_fs(get_ds()); + + result = sock_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, + (char *)&one, sizeof(one)); + set_fs(fs); + if (result < 0) { + printk("dlm: Set keepalive failed: %d\n", result); + } + + result = sock->ops->listen(sock, 5); + if (result < 0) { + printk("dlm: Can't listen on port %d\n", dlm_config.tcp_port); + sock_release(sock); + sock = NULL; + goto create_out; + } + +create_out: + return sock; +} + + +/* Listen on all interfaces */ +static int listen_for_all(void) +{ + struct socket *sock = NULL; + struct connection *con = nodeid2con(0, GFP_KERNEL); + int result = -EINVAL; + + /* We don't support multi-homed hosts */ + set_bit(CF_IS_OTHERCON, &con->flags); + + sock = create_listen_sock(con, &dlm_local_addr); + if (sock) { + add_sock(sock, con); + result = 0; + } + else { + result = -EADDRINUSE; + } + + return result; +} + + + +static struct writequeue_entry *new_writequeue_entry(struct connection *con, + gfp_t allocation) +{ + struct writequeue_entry *entry; + + entry = kmalloc(sizeof(struct writequeue_entry), allocation); + if (!entry) + return NULL; + + entry->page = alloc_page(allocation); + if (!entry->page) { + kfree(entry); + return NULL; + } + + entry->offset = 0; + entry->len = 0; + entry->end = 0; + entry->users = 0; + entry->con = con; + + return entry; +} + +void *dlm_lowcomms_get_buffer(int nodeid, int len, + gfp_t allocation, char **ppc) +{ + struct connection *con; + struct writequeue_entry *e; + int offset = 0; + int users = 0; + + con = nodeid2con(nodeid, allocation); + if (!con) + return NULL; + + e = list_entry(con->writequeue.prev, struct writequeue_entry, list); + if ((&e->list == &con->writequeue) || + (PAGE_CACHE_SIZE - e->end < len)) { + e = NULL; + } else { + offset = e->end; + e->end += len; + users = e->users++; + } + spin_unlock(&con->writequeue_lock); + + if (e) { + got_one: + if (users == 0) + kmap(e->page); + *ppc = page_address(e->page) + offset; + return e; + } + + e = new_writequeue_entry(con, allocation); + if (e) { + spin_lock(&con->writequeue_lock); + offset = e->end; + e->end += len; + users = e->users++; + list_add_tail(&e->list, &con->writequeue); + spin_unlock(&con->writequeue_lock); + goto got_one; + } + return NULL; +} + +void dlm_lowcomms_commit_buffer(void *mh) +{ + struct writequeue_entry *e = (struct writequeue_entry *)mh; + struct connection *con = e->con; + int users; + + users = --e->users; + if (users) + goto out; + e->len = e->end - e->offset; + kunmap(e->page); + spin_unlock(&con->writequeue_lock); + + if (test_and_set_bit(CF_WRITE_PENDING, &con->flags) == 0) { + spin_lock_bh(&write_sockets_lock); + list_add_tail(&con->write_list, &write_sockets); + spin_unlock_bh(&write_sockets_lock); + + wake_up_interruptible(&lowcomms_send_waitq); + } + return; + +out: + spin_unlock(&con->writequeue_lock); + return; +} + +static void free_entry(struct writequeue_entry *e) +{ + __free_page(e->page); + kfree(e); +} + +/* Send a message */ +static void send_to_sock(struct connection *con) +{ + int ret = 0; + ssize_t(*sendpage) (struct socket *, struct page *, int, size_t, int); + const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; + struct writequeue_entry *e; + int len, offset; + + down_read(&con->sock_sem); + if (con->sock == NULL) + goto out_connect; + + sendpage = con->sock->ops->sendpage; + + spin_lock(&con->writequeue_lock); + for (;;) { + e = list_entry(con->writequeue.next, struct writequeue_entry, + list); + if ((struct list_head *) e == &con->writequeue) + break; + + len = e->len; + offset = e->offset; + BUG_ON(len == 0 && e->users == 0); + spin_unlock(&con->writequeue_lock); + + ret = 0; + if (len) { + ret = sendpage(con->sock, e->page, offset, len, + msg_flags); + if (ret == -EAGAIN || ret == 0) + goto out; + if (ret <= 0) + goto send_error; + } + else { + /* Don't starve people filling buffers */ + cond_resched(); + } + + spin_lock(&con->writequeue_lock); + e->offset += ret; + e->len -= ret; + + if (e->len == 0 && e->users == 0) { + list_del(&e->list); + kunmap(e->page); + free_entry(e); + continue; + } + } + spin_unlock(&con->writequeue_lock); +out: + up_read(&con->sock_sem); + return; + +send_error: + up_read(&con->sock_sem); + close_connection(con, false); + lowcomms_connect_sock(con); + return; + +out_connect: + up_read(&con->sock_sem); + lowcomms_connect_sock(con); + return; +} + +static void clean_one_writequeue(struct connection *con) +{ + struct list_head *list; + struct list_head *temp; + + spin_lock(&con->writequeue_lock); + list_for_each_safe(list, temp, &con->writequeue) { + struct writequeue_entry *e = + list_entry(list, struct writequeue_entry, list); + list_del(&e->list); + free_entry(e); + } + spin_unlock(&con->writequeue_lock); +} + +/* Called from recovery when it knows that a node has + left the cluster */ +int dlm_lowcomms_close(int nodeid) +{ + struct connection *con; + + if (!connections) + goto out; + + log_print("closing connection to node %d", nodeid); + con = nodeid2con(nodeid, 0); + if (con) { + clean_one_writequeue(con); + close_connection(con, true); + atomic_set(&con->waiting_requests, 0); + } + return 0; + +out: + return -1; +} + +/* API send message call, may queue the request */ +/* N.B. This is the old interface - use the new one for new calls */ +int lowcomms_send_message(int nodeid, char *buf, int len, gfp_t allocation) +{ + struct writequeue_entry *e; + char *b; + + e = dlm_lowcomms_get_buffer(nodeid, len, allocation, &b); + if (e) { + memcpy(b, buf, len); + dlm_lowcomms_commit_buffer(e); + return 0; + } + return -ENOBUFS; +} + +/* Look for activity on active sockets */ +static void process_sockets(void) +{ + struct list_head *list; + struct list_head *temp; + int count = 0; + + spin_lock_bh(&read_sockets_lock); + list_for_each_safe(list, temp, &read_sockets) { + + struct connection *con = + list_entry(list, struct connection, read_list); + list_del(&con->read_list); + clear_bit(CF_READ_PENDING, &con->flags); + + spin_unlock_bh(&read_sockets_lock); + + /* This can reach zero if we are processing requests + * as they come in. + */ + if (atomic_read(&con->waiting_requests) == 0) { + spin_lock_bh(&read_sockets_lock); + continue; + } + + do { + con->rx_action(con); + + /* Don't starve out everyone else */ + if (++count >= MAX_RX_MSG_COUNT) { + cond_resched(); + count = 0; + } + + } while (!atomic_dec_and_test(&con->waiting_requests) && + !kthread_should_stop()); + + spin_lock_bh(&read_sockets_lock); + } + spin_unlock_bh(&read_sockets_lock); +} + +/* Try to send any messages that are pending + */ +static void process_output_queue(void) +{ + struct list_head *list; + struct list_head *temp; + + spin_lock_bh(&write_sockets_lock); + list_for_each_safe(list, temp, &write_sockets) { + struct connection *con = + list_entry(list, struct connection, write_list); + clear_bit(CF_WRITE_PENDING, &con->flags); + list_del(&con->write_list); + + spin_unlock_bh(&write_sockets_lock); + send_to_sock(con); + spin_lock_bh(&write_sockets_lock); + } + spin_unlock_bh(&write_sockets_lock); +} + +static void process_state_queue(void) +{ + struct list_head *list; + struct list_head *temp; + + spin_lock_bh(&state_sockets_lock); + list_for_each_safe(list, temp, &state_sockets) { + struct connection *con = + list_entry(list, struct connection, state_list); + list_del(&con->state_list); + clear_bit(CF_CONNECT_PENDING, &con->flags); + spin_unlock_bh(&state_sockets_lock); + + connect_to_sock(con); + spin_lock_bh(&state_sockets_lock); + } + spin_unlock_bh(&state_sockets_lock); +} + + +/* Discard all entries on the write queues */ +static void clean_writequeues(void) +{ + int nodeid; + + for (nodeid = 1; nodeid < conn_array_size; nodeid++) { + struct connection *con = nodeid2con(nodeid, 0); + + if (con) + clean_one_writequeue(con); + } +} + +static int read_list_empty(void) +{ + int status; + + spin_lock_bh(&read_sockets_lock); + status = list_empty(&read_sockets); + spin_unlock_bh(&read_sockets_lock); + + return status; +} + +/* DLM Transport comms receive daemon */ +static int dlm_recvd(void *data) +{ + init_waitqueue_entry(&lowcomms_recv_waitq_head, current); + add_wait_queue(&lowcomms_recv_waitq, &lowcomms_recv_waitq_head); + + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + if (read_list_empty()) + cond_resched(); + set_current_state(TASK_RUNNING); + + process_sockets(); + } + + return 0; +} + +static int write_and_state_lists_empty(void) +{ + int status; + + spin_lock_bh(&write_sockets_lock); + status = list_empty(&write_sockets); + spin_unlock_bh(&write_sockets_lock); + + spin_lock_bh(&state_sockets_lock); + if (list_empty(&state_sockets) == 0) + status = 0; + spin_unlock_bh(&state_sockets_lock); + + return status; +} + +/* DLM Transport send daemon */ +static int dlm_sendd(void *data) +{ + init_waitqueue_entry(&lowcomms_send_waitq_head, current); + add_wait_queue(&lowcomms_send_waitq, &lowcomms_send_waitq_head); + + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + if (write_and_state_lists_empty()) + cond_resched(); + set_current_state(TASK_RUNNING); + + process_state_queue(); + process_output_queue(); + } + + return 0; +} + +static void daemons_stop(void) +{ + kthread_stop(recv_task); + kthread_stop(send_task); +} + +static int daemons_start(void) +{ + struct task_struct *p; + int error; + + p = kthread_run(dlm_recvd, NULL, "dlm_recvd"); + error = IS_ERR(p); + if (error) { + log_print("can't start dlm_recvd %d", error); + return error; + } + recv_task = p; + + p = kthread_run(dlm_sendd, NULL, "dlm_sendd"); + error = IS_ERR(p); + if (error) { + log_print("can't start dlm_sendd %d", error); + kthread_stop(recv_task); + return error; + } + send_task = p; + + return 0; +} + +/* + * Return the largest buffer size we can cope with. + */ +int lowcomms_max_buffer_size(void) +{ + return PAGE_CACHE_SIZE; +} + +void dlm_lowcomms_stop(void) +{ + int i; + + /* Set all the flags to prevent any + socket activity. + */ + for (i = 0; i < conn_array_size; i++) { + if (connections[i]) + connections[i]->flags |= 0xFF; + } + + daemons_stop(); + clean_writequeues(); + + for (i = 0; i < conn_array_size; i++) { + if (connections[i]) { + close_connection(connections[i], true); + if (connections[i]->othercon) + kmem_cache_free(con_cache, connections[i]->othercon); + kmem_cache_free(con_cache, connections[i]); + } + } + + kfree(connections); + connections = NULL; + + kmem_cache_destroy(con_cache); +} + +/* This is quite likely to sleep... */ +int dlm_lowcomms_start(void) +{ + int error = 0; + + error = -ENOMEM; + connections = kzalloc(sizeof(struct connection *) * + NODE_INCREMENT, GFP_KERNEL); + if (!connections) + goto out; + + conn_array_size = NODE_INCREMENT; + + if (dlm_our_addr(&dlm_local_addr, 0)) { + log_print("no local IP address has been set"); + goto fail_free_conn; + } + if (!dlm_our_addr(&dlm_local_addr, 1)) { + log_print("This dlm comms module does not support multi-homed clustering"); + goto fail_free_conn; + } + + con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection), + __alignof__(struct connection), 0, + NULL, NULL); + if (!con_cache) + goto fail_free_conn; + + + /* Start listening */ + error = listen_for_all(); + if (error) + goto fail_unlisten; + + error = daemons_start(); + if (error) + goto fail_unlisten; + + return 0; + +fail_unlisten: + close_connection(connections[0], false); + kmem_cache_free(con_cache, connections[0]); + kmem_cache_destroy(con_cache); + +fail_free_conn: + kfree(connections); + +out: + return error; +} + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h index 2d045e0daae1..a9a9618c0d3f 100644 --- a/fs/dlm/lowcomms.h +++ b/fs/dlm/lowcomms.h @@ -14,8 +14,6 @@ #ifndef __LOWCOMMS_DOT_H__ #define __LOWCOMMS_DOT_H__ -int dlm_lowcomms_init(void); -void dlm_lowcomms_exit(void); int dlm_lowcomms_start(void); void dlm_lowcomms_stop(void); int dlm_lowcomms_close(int nodeid); diff --git a/fs/dlm/main.c b/fs/dlm/main.c index a8da8dc36b2e..162fbae58fe5 100644 --- a/fs/dlm/main.c +++ b/fs/dlm/main.c @@ -16,7 +16,6 @@ #include "lock.h" #include "user.h" #include "memory.h" -#include "lowcomms.h" #include "config.h" #ifdef CONFIG_DLM_DEBUG @@ -47,20 +46,14 @@ static int __init init_dlm(void) if (error) goto out_config; - error = dlm_lowcomms_init(); - if (error) - goto out_debug; - error = dlm_user_init(); if (error) - goto out_lowcomms; + goto out_debug; printk("DLM (built %s %s) installed\n", __DATE__, __TIME__); return 0; - out_lowcomms: - dlm_lowcomms_exit(); out_debug: dlm_unregister_debugfs(); out_config: @@ -76,7 +69,6 @@ static int __init init_dlm(void) static void __exit exit_dlm(void) { dlm_user_exit(); - dlm_lowcomms_exit(); dlm_config_exit(); dlm_memory_exit(); dlm_lockspace_exit(); diff --git a/fs/dlm/member.c b/fs/dlm/member.c index a3f7de7f3a8f..85e2897bd740 100644 --- a/fs/dlm/member.c +++ b/fs/dlm/member.c @@ -186,6 +186,14 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out) struct dlm_member *memb, *safe; int i, error, found, pos = 0, neg = 0, low = -1; + /* previously removed members that we've not finished removing need to + count as a negative change so the "neg" recovery steps will happen */ + + list_for_each_entry(memb, &ls->ls_nodes_gone, list) { + log_debug(ls, "prev removed member %d", memb->nodeid); + neg++; + } + /* move departed members from ls_nodes to ls_nodes_gone */ list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) { diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c index 989b608fd836..5352b03ff5aa 100644 --- a/fs/dlm/memory.c +++ b/fs/dlm/memory.c @@ -15,7 +15,7 @@ #include "config.h" #include "memory.h" -static kmem_cache_t *lkb_cache; +static struct kmem_cache *lkb_cache; int dlm_memory_init(void) diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c index 518239a8b1e9..4cc31be9cd9d 100644 --- a/fs/dlm/rcom.c +++ b/fs/dlm/rcom.c @@ -90,13 +90,28 @@ static int check_config(struct dlm_ls *ls, struct rcom_config *rf, int nodeid) return 0; } +static void allow_sync_reply(struct dlm_ls *ls, uint64_t *new_seq) +{ + spin_lock(&ls->ls_rcom_spin); + *new_seq = ++ls->ls_rcom_seq; + set_bit(LSFL_RCOM_WAIT, &ls->ls_flags); + spin_unlock(&ls->ls_rcom_spin); +} + +static void disallow_sync_reply(struct dlm_ls *ls) +{ + spin_lock(&ls->ls_rcom_spin); + clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags); + clear_bit(LSFL_RCOM_READY, &ls->ls_flags); + spin_unlock(&ls->ls_rcom_spin); +} + int dlm_rcom_status(struct dlm_ls *ls, int nodeid) { struct dlm_rcom *rc; struct dlm_mhandle *mh; int error = 0; - memset(ls->ls_recover_buf, 0, dlm_config.buffer_size); ls->ls_recover_nodeid = nodeid; if (nodeid == dlm_our_nodeid()) { @@ -108,12 +123,14 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid) error = create_rcom(ls, nodeid, DLM_RCOM_STATUS, 0, &rc, &mh); if (error) goto out; - rc->rc_id = ++ls->ls_rcom_seq; + + allow_sync_reply(ls, &rc->rc_id); + memset(ls->ls_recover_buf, 0, dlm_config.buffer_size); send_rcom(ls, mh, rc); error = dlm_wait_function(ls, &rcom_response); - clear_bit(LSFL_RCOM_READY, &ls->ls_flags); + disallow_sync_reply(ls); if (error) goto out; @@ -150,14 +167,21 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in) static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in) { - if (rc_in->rc_id != ls->ls_rcom_seq) { - log_debug(ls, "reject old reply %d got %llx wanted %llx", - rc_in->rc_type, rc_in->rc_id, ls->ls_rcom_seq); - return; + spin_lock(&ls->ls_rcom_spin); + if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) || + rc_in->rc_id != ls->ls_rcom_seq) { + log_debug(ls, "reject reply %d from %d seq %llx expect %llx", + rc_in->rc_type, rc_in->rc_header.h_nodeid, + (unsigned long long)rc_in->rc_id, + (unsigned long long)ls->ls_rcom_seq); + goto out; } memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length); set_bit(LSFL_RCOM_READY, &ls->ls_flags); + clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags); wake_up(&ls->ls_wait_general); + out: + spin_unlock(&ls->ls_rcom_spin); } static void receive_rcom_status_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in) @@ -171,7 +195,6 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len) struct dlm_mhandle *mh; int error = 0, len = sizeof(struct dlm_rcom); - memset(ls->ls_recover_buf, 0, dlm_config.buffer_size); ls->ls_recover_nodeid = nodeid; if (nodeid == dlm_our_nodeid()) { @@ -185,12 +208,14 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len) if (error) goto out; memcpy(rc->rc_buf, last_name, last_len); - rc->rc_id = ++ls->ls_rcom_seq; + + allow_sync_reply(ls, &rc->rc_id); + memset(ls->ls_recover_buf, 0, dlm_config.buffer_size); send_rcom(ls, mh, rc); error = dlm_wait_function(ls, &rcom_response); - clear_bit(LSFL_RCOM_READY, &ls->ls_flags); + disallow_sync_reply(ls); out: return error; } @@ -370,9 +395,10 @@ static void receive_rcom_lock_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in) static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in) { struct dlm_rcom *rc; + struct rcom_config *rf; struct dlm_mhandle *mh; char *mb; - int mb_len = sizeof(struct dlm_rcom); + int mb_len = sizeof(struct dlm_rcom) + sizeof(struct rcom_config); mh = dlm_lowcomms_get_buffer(nodeid, mb_len, GFP_KERNEL, &mb); if (!mh) @@ -391,6 +417,9 @@ static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in) rc->rc_id = rc_in->rc_id; rc->rc_result = -ESRCH; + rf = (struct rcom_config *) rc->rc_buf; + rf->rf_lvblen = -1; + dlm_rcom_out(rc); dlm_lowcomms_commit_buffer(mh); @@ -412,9 +441,10 @@ void dlm_receive_rcom(struct dlm_header *hd, int nodeid) ls = dlm_find_lockspace_global(hd->h_lockspace); if (!ls) { - log_print("lockspace %x from %d not found", - hd->h_lockspace, nodeid); - send_ls_not_ready(nodeid, rc); + log_print("lockspace %x from %d type %x not found", + hd->h_lockspace, nodeid, rc->rc_type); + if (rc->rc_type == DLM_RCOM_STATUS) + send_ls_not_ready(nodeid, rc); return; } diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index a5e6d184872e..cf9f6831bab5 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c @@ -252,6 +252,7 @@ static void recover_list_clear(struct dlm_ls *ls) spin_lock(&ls->ls_recover_list_lock); list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) { list_del_init(&r->res_recover_list); + r->res_recover_locks_count = 0; dlm_put_rsb(r); ls->ls_recover_list_count--; } diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c index 362e3eff4dc9..650536aa5139 100644 --- a/fs/dlm/recoverd.c +++ b/fs/dlm/recoverd.c @@ -45,7 +45,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) unsigned long start; int error, neg = 0; - log_debug(ls, "recover %llx", rv->seq); + log_debug(ls, "recover %llx", (unsigned long long)rv->seq); mutex_lock(&ls->ls_recoverd_active); @@ -94,14 +94,6 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) } /* - * Purge directory-related requests that are saved in requestqueue. - * All dir requests from before recovery are invalid now due to the dir - * rebuild and will be resent by the requesting nodes. - */ - - dlm_purge_requestqueue(ls); - - /* * Wait for all nodes to complete directory rebuild. */ @@ -164,10 +156,31 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) */ dlm_recover_rsbs(ls); + } else { + /* + * Other lockspace members may be going through the "neg" steps + * while also adding us to the lockspace, in which case they'll + * be doing the recover_locks (RS_LOCKS) barrier. + */ + dlm_set_recover_status(ls, DLM_RS_LOCKS); + + error = dlm_recover_locks_wait(ls); + if (error) { + log_error(ls, "recover_locks_wait failed %d", error); + goto fail; + } } dlm_release_root_list(ls); + /* + * Purge directory-related requests that are saved in requestqueue. + * All dir requests from before recovery are invalid now due to the dir + * rebuild and will be resent by the requesting nodes. + */ + + dlm_purge_requestqueue(ls); + dlm_set_recover_status(ls, DLM_RS_DONE); error = dlm_recover_done_wait(ls); if (error) { @@ -199,7 +212,8 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) dlm_astd_wake(); - log_debug(ls, "recover %llx done: %u ms", rv->seq, + log_debug(ls, "recover %llx done: %u ms", + (unsigned long long)rv->seq, jiffies_to_msecs(jiffies - start)); mutex_unlock(&ls->ls_recoverd_active); @@ -207,11 +221,16 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) fail: dlm_release_root_list(ls); - log_debug(ls, "recover %llx error %d", rv->seq, error); + log_debug(ls, "recover %llx error %d", + (unsigned long long)rv->seq, error); mutex_unlock(&ls->ls_recoverd_active); return error; } +/* The dlm_ls_start() that created the rv we take here may already have been + stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP + flag set. */ + static void do_ls_recovery(struct dlm_ls *ls) { struct dlm_recover *rv = NULL; @@ -219,7 +238,8 @@ static void do_ls_recovery(struct dlm_ls *ls) spin_lock(&ls->ls_recover_lock); rv = ls->ls_recover_args; ls->ls_recover_args = NULL; - clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags); + if (rv && ls->ls_recover_seq == rv->seq) + clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags); spin_unlock(&ls->ls_recover_lock); if (rv) { diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c index 7b2b089634a2..65008d79c96d 100644 --- a/fs/dlm/requestqueue.c +++ b/fs/dlm/requestqueue.c @@ -30,26 +30,36 @@ struct rq_entry { * lockspace is enabled on some while still suspended on others. */ -void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd) +int dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd) { struct rq_entry *e; int length = hd->h_length; - - if (dlm_is_removed(ls, nodeid)) - return; + int rv = 0; e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL); if (!e) { log_print("dlm_add_requestqueue: out of memory\n"); - return; + return 0; } e->nodeid = nodeid; memcpy(e->request, hd, length); + /* We need to check dlm_locking_stopped() after taking the mutex to + avoid a race where dlm_recoverd enables locking and runs + process_requestqueue between our earlier dlm_locking_stopped check + and this addition to the requestqueue. */ + mutex_lock(&ls->ls_requestqueue_mutex); - list_add_tail(&e->list, &ls->ls_requestqueue); + if (dlm_locking_stopped(ls)) + list_add_tail(&e->list, &ls->ls_requestqueue); + else { + log_debug(ls, "dlm_add_requestqueue skip from %d", nodeid); + kfree(e); + rv = -EAGAIN; + } mutex_unlock(&ls->ls_requestqueue_mutex); + return rv; } int dlm_process_requestqueue(struct dlm_ls *ls) @@ -120,6 +130,10 @@ static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid) { uint32_t type = ms->m_type; + /* the ls is being cleaned up and freed by release_lockspace */ + if (!ls->ls_count) + return 1; + if (dlm_is_removed(ls, nodeid)) return 1; diff --git a/fs/dlm/requestqueue.h b/fs/dlm/requestqueue.h index 349f0d292d95..6a53ea03335d 100644 --- a/fs/dlm/requestqueue.h +++ b/fs/dlm/requestqueue.h @@ -13,7 +13,7 @@ #ifndef __REQUESTQUEUE_DOT_H__ #define __REQUESTQUEUE_DOT_H__ -void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd); +int dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd); int dlm_process_requestqueue(struct dlm_ls *ls); void dlm_wait_requestqueue(struct dlm_ls *ls); void dlm_purge_requestqueue(struct dlm_ls *ls); diff --git a/fs/dnotify.c b/fs/dnotify.c index 2b0442db67e0..1f26a2b9eee1 100644 --- a/fs/dnotify.c +++ b/fs/dnotify.c @@ -23,7 +23,7 @@ int dir_notify_enable __read_mostly = 1; -static kmem_cache_t *dn_cache __read_mostly; +static struct kmem_cache *dn_cache __read_mostly; static void redo_inode_mask(struct inode *inode) { @@ -77,7 +77,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) inode = filp->f_dentry->d_inode; if (!S_ISDIR(inode->i_mode)) return -ENOTDIR; - dn = kmem_cache_alloc(dn_cache, SLAB_KERNEL); + dn = kmem_cache_alloc(dn_cache, GFP_KERNEL); if (dn == NULL) return -ENOMEM; spin_lock(&inode->i_lock); diff --git a/fs/dquot.c b/fs/dquot.c index 9af789567e51..f9cd5e23ebdf 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -131,7 +131,7 @@ static struct quota_format_type *quota_formats; /* List of registered formats */ static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; /* SLAB cache for dquot structures */ -static kmem_cache_t *dquot_cachep; +static struct kmem_cache *dquot_cachep; int register_quota_format(struct quota_format_type *fmt) { @@ -600,7 +600,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) { struct dquot *dquot; - dquot = kmem_cache_alloc(dquot_cachep, SLAB_NOFS); + dquot = kmem_cache_alloc(dquot_cachep, GFP_NOFS); if(!dquot) return NODQUOT; diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 136175a69332..7196f50fe152 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -628,7 +628,7 @@ int ecryptfs_decrypt_page(struct file *file, struct page *page) num_extents_per_page = PAGE_CACHE_SIZE / crypt_stat->extent_size; base_extent = (page->index * num_extents_per_page); lower_page_virt = kmem_cache_alloc(ecryptfs_lower_page_cache, - SLAB_KERNEL); + GFP_KERNEL); if (!lower_page_virt) { rc = -ENOMEM; ecryptfs_printk(KERN_ERR, "Error getting page for encrypted " @@ -820,7 +820,8 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat) crypt_stat->tfm = crypto_alloc_blkcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC); kfree(full_alg_name); - if (!crypt_stat->tfm) { + if (IS_ERR(crypt_stat->tfm)) { + rc = PTR_ERR(crypt_stat->tfm); ecryptfs_printk(KERN_ERR, "cryptfs: init_crypt_ctx(): " "Error initializing cipher [%s]\n", crypt_stat->cipher); @@ -1333,7 +1334,7 @@ int ecryptfs_write_headers(struct dentry *ecryptfs_dentry, goto out; } /* Released in this function */ - page_virt = kmem_cache_alloc(ecryptfs_header_cache_0, SLAB_USER); + page_virt = kmem_cache_alloc(ecryptfs_header_cache_0, GFP_USER); if (!page_virt) { ecryptfs_printk(KERN_ERR, "Out of memory\n"); rc = -ENOMEM; @@ -1492,7 +1493,7 @@ int ecryptfs_read_headers(struct dentry *ecryptfs_dentry, &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; /* Read the first page from the underlying file */ - page_virt = kmem_cache_alloc(ecryptfs_header_cache_1, SLAB_USER); + page_virt = kmem_cache_alloc(ecryptfs_header_cache_1, GFP_USER); if (!page_virt) { rc = -ENOMEM; ecryptfs_printk(KERN_ERR, "Unable to allocate page_virt\n"); diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c index 0b9992ab990f..52d1e36dc746 100644 --- a/fs/ecryptfs/dentry.c +++ b/fs/ecryptfs/dentry.c @@ -57,6 +57,12 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd) rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd); nd->dentry = dentry_save; nd->mnt = vfsmount_save; + if (dentry->d_inode) { + struct inode *lower_inode = + ecryptfs_inode_to_lower(dentry->d_inode); + + ecryptfs_copy_attr_all(dentry->d_inode, lower_inode); + } out: return rc; } diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index a92ef05eff8f..42099e779a56 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -250,7 +250,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file) int lower_flags; /* Released in ecryptfs_release or end of function if failure */ - file_info = kmem_cache_alloc(ecryptfs_file_info_cache, SLAB_KERNEL); + file_info = kmem_cache_alloc(ecryptfs_file_info_cache, GFP_KERNEL); ecryptfs_set_file_private(file, file_info); if (!file_info) { ecryptfs_printk(KERN_ERR, diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index ff4865d24f0f..8a1945a84c36 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -369,7 +369,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry, BUG_ON(!atomic_read(&lower_dentry->d_count)); ecryptfs_set_dentry_private(dentry, kmem_cache_alloc(ecryptfs_dentry_info_cache, - SLAB_KERNEL)); + GFP_KERNEL)); if (!ecryptfs_dentry_to_private(dentry)) { rc = -ENOMEM; ecryptfs_printk(KERN_ERR, "Out of memory whilst attempting " @@ -404,7 +404,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry, /* Released in this function */ page_virt = (char *)kmem_cache_alloc(ecryptfs_header_cache_2, - SLAB_USER); + GFP_USER); if (!page_virt) { rc = -ENOMEM; ecryptfs_printk(KERN_ERR, @@ -470,6 +470,7 @@ out_lock: unlock_dir(lower_dir_dentry); dput(lower_new_dentry); dput(lower_old_dentry); + d_drop(lower_old_dentry); d_drop(new_dentry); d_drop(old_dentry); return rc; @@ -484,7 +485,7 @@ static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry) lock_parent(lower_dentry); rc = vfs_unlink(lower_dir_inode, lower_dentry); if (rc) { - ecryptfs_printk(KERN_ERR, "Error in vfs_unlink\n"); + printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc); goto out_unlock; } ecryptfs_copy_attr_times(dir, lower_dir_inode); @@ -630,6 +631,8 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry, ecryptfs_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode); out_lock: unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry); + dput(lower_new_dentry->d_parent); + dput(lower_old_dentry->d_parent); dput(lower_new_dentry); dput(lower_old_dentry); return rc; @@ -792,7 +795,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length) /* Released at out_free: label */ ecryptfs_set_file_private(&fake_ecryptfs_file, kmem_cache_alloc(ecryptfs_file_info_cache, - SLAB_KERNEL)); + GFP_KERNEL)); if (unlikely(!ecryptfs_file_to_private(&fake_ecryptfs_file))) { rc = -ENOMEM; goto out; diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index c3746f56d162..745c0f1bfbbd 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -207,7 +207,7 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat, /* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or * at end of function upon failure */ auth_tok_list_item = - kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, SLAB_KERNEL); + kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL); if (!auth_tok_list_item) { ecryptfs_printk(KERN_ERR, "Unable to allocate memory\n"); rc = -ENOMEM; diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index a78d87d14baf..3ede12b25933 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -378,7 +378,7 @@ ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent) /* Released in ecryptfs_put_super() */ ecryptfs_set_superblock_private(sb, kmem_cache_alloc(ecryptfs_sb_info_cache, - SLAB_KERNEL)); + GFP_KERNEL)); if (!ecryptfs_superblock_to_private(sb)) { ecryptfs_printk(KERN_WARNING, "Out of memory\n"); rc = -ENOMEM; @@ -402,7 +402,7 @@ ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent) /* through deactivate_super(sb) from get_sb_nodev() */ ecryptfs_set_dentry_private(sb->s_root, kmem_cache_alloc(ecryptfs_dentry_info_cache, - SLAB_KERNEL)); + GFP_KERNEL)); if (!ecryptfs_dentry_to_private(sb->s_root)) { ecryptfs_printk(KERN_ERR, "dentry_info_cache alloc failed\n"); @@ -546,7 +546,7 @@ inode_info_init_once(void *vptr, struct kmem_cache *cachep, unsigned long flags) } static struct ecryptfs_cache_info { - kmem_cache_t **cache; + struct kmem_cache **cache; const char *name; size_t size; void (*ctor)(void*, struct kmem_cache *, unsigned long); @@ -691,7 +691,7 @@ static ssize_t version_show(struct ecryptfs_obj *obj, char *buff) static struct ecryptfs_attribute sysfs_attr_version = __ATTR_RO(version); -struct ecryptfs_version_str_map_elem { +static struct ecryptfs_version_str_map_elem { u32 flag; char *str; } ecryptfs_version_str_map[] = { diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c index 825757ae4867..eaa5daaf106e 100644 --- a/fs/ecryptfs/super.c +++ b/fs/ecryptfs/super.c @@ -50,7 +50,7 @@ static struct inode *ecryptfs_alloc_inode(struct super_block *sb) struct inode *inode = NULL; ecryptfs_inode = kmem_cache_alloc(ecryptfs_inode_info_cache, - SLAB_KERNEL); + GFP_KERNEL); if (unlikely(!ecryptfs_inode)) goto out; ecryptfs_init_crypt_stat(&ecryptfs_inode->crypt_stat); diff --git a/fs/efs/super.c b/fs/efs/super.c index b3f50651eb6b..dfebf21289f4 100644 --- a/fs/efs/super.c +++ b/fs/efs/super.c @@ -52,12 +52,12 @@ static struct pt_types sgi_pt_types[] = { }; -static kmem_cache_t * efs_inode_cachep; +static struct kmem_cache * efs_inode_cachep; static struct inode *efs_alloc_inode(struct super_block *sb) { struct efs_inode_info *ei; - ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, SLAB_KERNEL); + ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -68,7 +68,7 @@ static void efs_destroy_inode(struct inode *inode) kmem_cache_free(efs_inode_cachep, INODE_INFO(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct efs_inode_info *ei = (struct efs_inode_info *) foo; diff --git a/fs/eventpoll.c b/fs/eventpoll.c index ae228ec54e94..88a6f8d0b88e 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -283,10 +283,10 @@ static struct mutex epmutex; static struct poll_safewake psw; /* Slab cache used to allocate "struct epitem" */ -static kmem_cache_t *epi_cache __read_mostly; +static struct kmem_cache *epi_cache __read_mostly; /* Slab cache used to allocate "struct eppoll_entry" */ -static kmem_cache_t *pwq_cache __read_mostly; +static struct kmem_cache *pwq_cache __read_mostly; /* Virtual fs used to allocate inodes for eventpoll files */ static struct vfsmount *eventpoll_mnt __read_mostly; @@ -961,7 +961,7 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, struct epitem *epi = ep_item_from_epqueue(pt); struct eppoll_entry *pwq; - if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, SLAB_KERNEL))) { + if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) { init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); pwq->whead = whead; pwq->base = epi; @@ -1004,7 +1004,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, struct ep_pqueue epq; error = -ENOMEM; - if (!(epi = kmem_cache_alloc(epi_cache, SLAB_KERNEL))) + if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) goto eexit_1; /* Item initialization follow here ... */ diff --git a/fs/exec.c b/fs/exec.c index d993ea1a81ae..add0e03c3ea9 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -404,7 +404,7 @@ int setup_arg_pages(struct linux_binprm *bprm, bprm->loader += stack_base; bprm->exec += stack_base; - mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!mpnt) return -ENOMEM; @@ -1515,7 +1515,8 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) ispipe = 1; } else file = filp_open(corename, - O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE, 0600); + O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, + 0600); if (IS_ERR(file)) goto fail_unlock; inode = file->f_dentry->d_inode; diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c index 1dfba77eab10..e3cf8c81507f 100644 --- a/fs/ext2/ioctl.c +++ b/fs/ext2/ioctl.c @@ -44,6 +44,7 @@ int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, if (!S_ISDIR(inode->i_mode)) flags &= ~EXT2_DIRSYNC_FL; + mutex_lock(&inode->i_mutex); oldflags = ei->i_flags; /* @@ -53,13 +54,16 @@ int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, * This test looks nicer. Thanks to Pauline Middelink */ if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) { - if (!capable(CAP_LINUX_IMMUTABLE)) + if (!capable(CAP_LINUX_IMMUTABLE)) { + mutex_unlock(&inode->i_mutex); return -EPERM; + } } flags = flags & EXT2_FL_USER_MODIFIABLE; flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE; ei->i_flags = flags; + mutex_unlock(&inode->i_mutex); ext2_set_inode_flags(inode); inode->i_ctime = CURRENT_TIME_SEC; diff --git a/fs/ext2/super.c b/fs/ext2/super.c index d8b9abd95d07..255cef5f7420 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -135,12 +135,12 @@ static void ext2_put_super (struct super_block * sb) return; } -static kmem_cache_t * ext2_inode_cachep; +static struct kmem_cache * ext2_inode_cachep; static struct inode *ext2_alloc_inode(struct super_block *sb) { struct ext2_inode_info *ei; - ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, SLAB_KERNEL); + ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL); if (!ei) return NULL; #ifdef CONFIG_EXT2_FS_POSIX_ACL @@ -156,7 +156,7 @@ static void ext2_destroy_inode(struct inode *inode) kmem_cache_free(ext2_inode_cachep, EXT2_I(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct ext2_inode_info *ei = (struct ext2_inode_info *) foo; @@ -1090,8 +1090,10 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf) { struct super_block *sb = dentry->d_sb; struct ext2_sb_info *sbi = EXT2_SB(sb); + struct ext2_super_block *es = sbi->s_es; unsigned long overhead; int i; + u64 fsid; if (test_opt (sb, MINIX_DF)) overhead = 0; @@ -1104,7 +1106,7 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf) * All of the blocks before first_data_block are * overhead */ - overhead = le32_to_cpu(sbi->s_es->s_first_data_block); + overhead = le32_to_cpu(es->s_first_data_block); /* * Add the overhead attributed to the superblock and @@ -1125,14 +1127,18 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf) buf->f_type = EXT2_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; - buf->f_blocks = le32_to_cpu(sbi->s_es->s_blocks_count) - overhead; + buf->f_blocks = le32_to_cpu(es->s_blocks_count) - overhead; buf->f_bfree = ext2_count_free_blocks(sb); - buf->f_bavail = buf->f_bfree - le32_to_cpu(sbi->s_es->s_r_blocks_count); - if (buf->f_bfree < le32_to_cpu(sbi->s_es->s_r_blocks_count)) + buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count); + if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count)) buf->f_bavail = 0; - buf->f_files = le32_to_cpu(sbi->s_es->s_inodes_count); - buf->f_ffree = ext2_count_free_inodes (sb); + buf->f_files = le32_to_cpu(es->s_inodes_count); + buf->f_ffree = ext2_count_free_inodes(sb); buf->f_namelen = EXT2_NAME_LEN; + fsid = le64_to_cpup((void *)es->s_uuid) ^ + le64_to_cpup((void *)es->s_uuid + sizeof(u64)); + buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; + buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; return 0; } diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index af52a7f8b291..247efd0b51d6 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -342,12 +342,9 @@ static void ext2_xattr_update_super_block(struct super_block *sb) if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR)) return; - lock_super(sb); - EXT2_SB(sb)->s_es->s_feature_compat |= - cpu_to_le32(EXT2_FEATURE_COMPAT_EXT_ATTR); + EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR); sb->s_dirt = 1; mark_buffer_dirty(EXT2_SB(sb)->s_sbh); - unlock_super(sb); } /* diff --git a/fs/ext3/Makefile b/fs/ext3/Makefile index 704cd44a40c2..e77766a8b3f0 100644 --- a/fs/ext3/Makefile +++ b/fs/ext3/Makefile @@ -5,7 +5,7 @@ obj-$(CONFIG_EXT3_FS) += ext3.o ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \ - ioctl.o namei.o super.o symlink.o hash.o resize.o + ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index b41a7d7e20f0..22161740ba29 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c @@ -144,7 +144,7 @@ restart: printk("Block Allocation Reservation Windows Map (%s):\n", fn); while (n) { - rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node); + rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); if (verbose) printk("reservation window 0x%p " "start: %lu, end: %lu\n", @@ -730,7 +730,7 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh, here = 0; p = ((char *)bh->b_data) + (here >> 3); - r = memscan(p, 0, (maxblocks - here + 7) >> 3); + r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3)); next = (r - ((char *)bh->b_data)) << 3; if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh)) @@ -949,7 +949,7 @@ static int find_next_reservable_window( prev = rsv; next = rb_next(&rsv->rsv_node); - rsv = list_entry(next,struct ext3_reserve_window_node,rsv_node); + rsv = rb_entry(next,struct ext3_reserve_window_node,rsv_node); /* * Reached the last reservation, we can just append to the @@ -1148,7 +1148,7 @@ retry: * check if the first free block is within the * free space we just reserved */ - if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end) + if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end) return 0; /* success */ /* * if the first free bit we found is out of the reservable space @@ -1193,7 +1193,7 @@ static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv, if (!next) my_rsv->rsv_end += size; else { - next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node); + next_rsv = rb_entry(next, struct ext3_reserve_window_node, rsv_node); if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size) my_rsv->rsv_end += size; @@ -1271,7 +1271,7 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, } /* * grp_goal is a group relative block number (if there is a goal) - * 0 < grp_goal < EXT3_BLOCKS_PER_GROUP(sb) + * 0 <= grp_goal < EXT3_BLOCKS_PER_GROUP(sb) * first block is a filesystem wide block number * first block is the block number of the first block in this group */ @@ -1307,10 +1307,14 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, if (!goal_in_my_reservation(&my_rsv->rsv_window, grp_goal, group, sb)) grp_goal = -1; - } else if (grp_goal > 0 && - (my_rsv->rsv_end-grp_goal+1) < *count) - try_to_extend_reservation(my_rsv, sb, - *count-my_rsv->rsv_end + grp_goal - 1); + } else if (grp_goal >= 0) { + int curr = my_rsv->rsv_end - + (grp_goal + group_first_block) + 1; + + if (curr < *count) + try_to_extend_reservation(my_rsv, sb, + *count - curr); + } if ((my_rsv->rsv_start > group_last_block) || (my_rsv->rsv_end < group_first_block)) { @@ -1511,10 +1515,8 @@ retry_alloc: if (group_no >= ngroups) group_no = 0; gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); - if (!gdp) { - *errp = -EIO; - goto out; - } + if (!gdp) + goto io_error; free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); /* * skip this group if the number of @@ -1548,6 +1550,7 @@ retry_alloc: */ if (my_rsv) { my_rsv = NULL; + windowsz = 0; group_no = goal_group; goto retry_alloc; } diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c index d0b54f30b914..5a9313ecd4ef 100644 --- a/fs/ext3/dir.c +++ b/fs/ext3/dir.c @@ -154,6 +154,9 @@ static int ext3_readdir(struct file * filp, ext3_error (sb, "ext3_readdir", "directory #%lu contains a hole at offset %lu", inode->i_ino, (unsigned long)filp->f_pos); + /* corrupt size? Maybe no more blocks to read */ + if (filp->f_pos > inode->i_blocks << 9) + break; filp->f_pos += sb->s_blocksize - offset; continue; } diff --git a/fs/ext3/ext3_jbd.c b/fs/ext3/ext3_jbd.c new file mode 100644 index 000000000000..e1f91fd26a93 --- /dev/null +++ b/fs/ext3/ext3_jbd.c @@ -0,0 +1,59 @@ +/* + * Interface between ext3 and JBD + */ + +#include <linux/ext3_jbd.h> + +int __ext3_journal_get_undo_access(const char *where, handle_t *handle, + struct buffer_head *bh) +{ + int err = journal_get_undo_access(handle, bh); + if (err) + ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} + +int __ext3_journal_get_write_access(const char *where, handle_t *handle, + struct buffer_head *bh) +{ + int err = journal_get_write_access(handle, bh); + if (err) + ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} + +int __ext3_journal_forget(const char *where, handle_t *handle, + struct buffer_head *bh) +{ + int err = journal_forget(handle, bh); + if (err) + ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} + +int __ext3_journal_revoke(const char *where, handle_t *handle, + unsigned long blocknr, struct buffer_head *bh) +{ + int err = journal_revoke(handle, blocknr, bh); + if (err) + ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} + +int __ext3_journal_get_create_access(const char *where, + handle_t *handle, struct buffer_head *bh) +{ + int err = journal_get_create_access(handle, bh); + if (err) + ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} + +int __ext3_journal_dirty_metadata(const char *where, + handle_t *handle, struct buffer_head *bh) +{ + int err = journal_dirty_metadata(handle, bh); + if (err) + ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 03ba5bcab186..beaf25f5112f 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -1148,37 +1148,102 @@ static int do_journal_get_write_access(handle_t *handle, return ext3_journal_get_write_access(handle, bh); } +/* + * The idea of this helper function is following: + * if prepare_write has allocated some blocks, but not all of them, the + * transaction must include the content of the newly allocated blocks. + * This content is expected to be set to zeroes by block_prepare_write(). + * 2006/10/14 SAW + */ +static int ext3_prepare_failure(struct file *file, struct page *page, + unsigned from, unsigned to) +{ + struct address_space *mapping; + struct buffer_head *bh, *head, *next; + unsigned block_start, block_end; + unsigned blocksize; + int ret; + handle_t *handle = ext3_journal_current_handle(); + + mapping = page->mapping; + if (ext3_should_writeback_data(mapping->host)) { + /* optimization: no constraints about data */ +skip: + return ext3_journal_stop(handle); + } + + head = page_buffers(page); + blocksize = head->b_size; + for ( bh = head, block_start = 0; + bh != head || !block_start; + block_start = block_end, bh = next) + { + next = bh->b_this_page; + block_end = block_start + blocksize; + if (block_end <= from) + continue; + if (block_start >= to) { + block_start = to; + break; + } + if (!buffer_mapped(bh)) + /* prepare_write failed on this bh */ + break; + if (ext3_should_journal_data(mapping->host)) { + ret = do_journal_get_write_access(handle, bh); + if (ret) { + ext3_journal_stop(handle); + return ret; + } + } + /* + * block_start here becomes the first block where the current iteration + * of prepare_write failed. + */ + } + if (block_start <= from) + goto skip; + + /* commit allocated and zeroed buffers */ + return mapping->a_ops->commit_write(file, page, from, block_start); +} + static int ext3_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to) { struct inode *inode = page->mapping->host; - int ret, needed_blocks = ext3_writepage_trans_blocks(inode); + int ret, ret2; + int needed_blocks = ext3_writepage_trans_blocks(inode); handle_t *handle; int retries = 0; retry: handle = ext3_journal_start(inode, needed_blocks); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - goto out; - } + if (IS_ERR(handle)) + return PTR_ERR(handle); if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode)) ret = nobh_prepare_write(page, from, to, ext3_get_block); else ret = block_prepare_write(page, from, to, ext3_get_block); if (ret) - goto prepare_write_failed; + goto failure; if (ext3_should_journal_data(inode)) { ret = walk_page_buffers(handle, page_buffers(page), from, to, NULL, do_journal_get_write_access); + if (ret) + /* fatal error, just put the handle and return */ + journal_stop(handle); } -prepare_write_failed: - if (ret) - ext3_journal_stop(handle); + return ret; + +failure: + ret2 = ext3_prepare_failure(file, page, from, to); + if (ret2 < 0) + return ret2; if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) goto retry; -out: + /* retry number exceeded, or other error like -EDQUOT */ return ret; } diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 906731a20f1a..60d2f9dbdb00 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -552,6 +552,15 @@ static int htree_dirblock_to_tree(struct file *dir_file, dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(0)); for (; de < top; de = ext3_next_entry(de)) { + if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh, + (block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb)) + +((char *)de - bh->b_data))) { + /* On error, skip the f_pos to the next block. */ + dir_file->f_pos = (dir_file->f_pos | + (dir->i_sb->s_blocksize - 1)) + 1; + brelse (bh); + return count; + } ext3fs_dirhash(de->name, de->name_len, hinfo); if ((hinfo->hash < start_hash) || ((hinfo->hash == start_hash) && diff --git a/fs/ext3/super.c b/fs/ext3/super.c index afc2d4f42d77..580b8a6ca979 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -436,7 +436,7 @@ static void ext3_put_super (struct super_block * sb) return; } -static kmem_cache_t *ext3_inode_cachep; +static struct kmem_cache *ext3_inode_cachep; /* * Called inside transaction, so use GFP_NOFS @@ -445,7 +445,7 @@ static struct inode *ext3_alloc_inode(struct super_block *sb) { struct ext3_inode_info *ei; - ei = kmem_cache_alloc(ext3_inode_cachep, SLAB_NOFS); + ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS); if (!ei) return NULL; #ifdef CONFIG_EXT3_FS_POSIX_ACL @@ -462,7 +462,7 @@ static void ext3_destroy_inode(struct inode *inode) kmem_cache_free(ext3_inode_cachep, EXT3_I(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; @@ -1264,6 +1264,12 @@ static void ext3_orphan_cleanup (struct super_block * sb, return; } + if (bdev_read_only(sb->s_bdev)) { + printk(KERN_ERR "EXT3-fs: write access " + "unavailable, skipping orphan cleanup.\n"); + return; + } + if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) { if (es->s_last_orphan) jbd_debug(1, "Errors on filesystem, " @@ -2387,6 +2393,7 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf) struct ext3_super_block *es = sbi->s_es; ext3_fsblk_t overhead; int i; + u64 fsid; if (test_opt (sb, MINIX_DF)) overhead = 0; @@ -2433,6 +2440,10 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf) buf->f_files = le32_to_cpu(es->s_inodes_count); buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter); buf->f_namelen = EXT3_NAME_LEN; + fsid = le64_to_cpup((void *)es->s_uuid) ^ + le64_to_cpup((void *)es->s_uuid + sizeof(u64)); + buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; + buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; return 0; } diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c index f86f2482f01d..99857a400f4b 100644 --- a/fs/ext3/xattr.c +++ b/fs/ext3/xattr.c @@ -459,14 +459,11 @@ static void ext3_xattr_update_super_block(handle_t *handle, if (EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR)) return; - lock_super(sb); if (ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh) == 0) { - EXT3_SB(sb)->s_es->s_feature_compat |= - cpu_to_le32(EXT3_FEATURE_COMPAT_EXT_ATTR); + EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR); sb->s_dirt = 1; ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); } - unlock_super(sb); } /* diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile index a6acb96ebeb9..ae6e7e502ac9 100644 --- a/fs/ext4/Makefile +++ b/fs/ext4/Makefile @@ -5,7 +5,8 @@ obj-$(CONFIG_EXT4DEV_FS) += ext4dev.o ext4dev-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \ - ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o + ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \ + ext4_jbd2.o ext4dev-$(CONFIG_EXT4DEV_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o ext4dev-$(CONFIG_EXT4DEV_FS_POSIX_ACL) += acl.o diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 5d45582f9517..c4dd1103ccf1 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -165,7 +165,7 @@ restart: printk("Block Allocation Reservation Windows Map (%s):\n", fn); while (n) { - rsv = list_entry(n, struct ext4_reserve_window_node, rsv_node); + rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node); if (verbose) printk("reservation window 0x%p " "start: %llu, end: %llu\n", @@ -747,7 +747,7 @@ find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh, here = 0; p = ((char *)bh->b_data) + (here >> 3); - r = memscan(p, 0, (maxblocks - here + 7) >> 3); + r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3)); next = (r - ((char *)bh->b_data)) << 3; if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh)) @@ -966,7 +966,7 @@ static int find_next_reservable_window( prev = rsv; next = rb_next(&rsv->rsv_node); - rsv = list_entry(next,struct ext4_reserve_window_node,rsv_node); + rsv = rb_entry(next,struct ext4_reserve_window_node,rsv_node); /* * Reached the last reservation, we can just append to the @@ -1165,7 +1165,7 @@ retry: * check if the first free block is within the * free space we just reserved */ - if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end) + if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end) return 0; /* success */ /* * if the first free bit we found is out of the reservable space @@ -1210,7 +1210,7 @@ static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv, if (!next) my_rsv->rsv_end += size; else { - next_rsv = list_entry(next, struct ext4_reserve_window_node, rsv_node); + next_rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node); if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size) my_rsv->rsv_end += size; @@ -1288,7 +1288,7 @@ ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, } /* * grp_goal is a group relative block number (if there is a goal) - * 0 < grp_goal < EXT4_BLOCKS_PER_GROUP(sb) + * 0 <= grp_goal < EXT4_BLOCKS_PER_GROUP(sb) * first block is a filesystem wide block number * first block is the block number of the first block in this group */ @@ -1324,10 +1324,14 @@ ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, if (!goal_in_my_reservation(&my_rsv->rsv_window, grp_goal, group, sb)) grp_goal = -1; - } else if (grp_goal > 0 && - (my_rsv->rsv_end-grp_goal+1) < *count) - try_to_extend_reservation(my_rsv, sb, - *count-my_rsv->rsv_end + grp_goal - 1); + } else if (grp_goal >= 0) { + int curr = my_rsv->rsv_end - + (grp_goal + group_first_block) + 1; + + if (curr < *count) + try_to_extend_reservation(my_rsv, sb, + *count - curr); + } if ((my_rsv->rsv_start > group_last_block) || (my_rsv->rsv_end < group_first_block)) { @@ -1525,10 +1529,8 @@ retry_alloc: if (group_no >= ngroups) group_no = 0; gdp = ext4_get_group_desc(sb, group_no, &gdp_bh); - if (!gdp) { - *errp = -EIO; - goto out; - } + if (!gdp) + goto io_error; free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); /* * skip this group if the number of @@ -1562,6 +1564,7 @@ retry_alloc: */ if (my_rsv) { my_rsv = NULL; + windowsz = 0; group_no = goal_group; goto retry_alloc; } diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index f8595787a70e..f2ed3e7fb9f5 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -153,6 +153,9 @@ static int ext4_readdir(struct file * filp, ext4_error (sb, "ext4_readdir", "directory #%lu contains a hole at offset %lu", inode->i_ino, (unsigned long)filp->f_pos); + /* corrupt size? Maybe no more blocks to read */ + if (filp->f_pos > inode->i_blocks << 9) + break; filp->f_pos += sb->s_blocksize - offset; continue; } diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c new file mode 100644 index 000000000000..d6afe4e27340 --- /dev/null +++ b/fs/ext4/ext4_jbd2.c @@ -0,0 +1,59 @@ +/* + * Interface between ext4 and JBD + */ + +#include <linux/ext4_jbd2.h> + +int __ext4_journal_get_undo_access(const char *where, handle_t *handle, + struct buffer_head *bh) +{ + int err = jbd2_journal_get_undo_access(handle, bh); + if (err) + ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} + +int __ext4_journal_get_write_access(const char *where, handle_t *handle, + struct buffer_head *bh) +{ + int err = jbd2_journal_get_write_access(handle, bh); + if (err) + ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} + +int __ext4_journal_forget(const char *where, handle_t *handle, + struct buffer_head *bh) +{ + int err = jbd2_journal_forget(handle, bh); + if (err) + ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} + +int __ext4_journal_revoke(const char *where, handle_t *handle, + ext4_fsblk_t blocknr, struct buffer_head *bh) +{ + int err = jbd2_journal_revoke(handle, blocknr, bh); + if (err) + ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} + +int __ext4_journal_get_create_access(const char *where, + handle_t *handle, struct buffer_head *bh) +{ + int err = jbd2_journal_get_create_access(handle, bh); + if (err) + ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} + +int __ext4_journal_dirty_metadata(const char *where, + handle_t *handle, struct buffer_head *bh) +{ + int err = jbd2_journal_dirty_metadata(handle, bh); + if (err) + ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err); + return err; +} diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 2608dce18f3e..dc2724fa7622 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -48,7 +48,7 @@ * ext_pblock: * combine low and high parts of physical block number into ext4_fsblk_t */ -static inline ext4_fsblk_t ext_pblock(struct ext4_extent *ex) +static ext4_fsblk_t ext_pblock(struct ext4_extent *ex) { ext4_fsblk_t block; @@ -61,7 +61,7 @@ static inline ext4_fsblk_t ext_pblock(struct ext4_extent *ex) * idx_pblock: * combine low and high parts of a leaf physical block number into ext4_fsblk_t */ -static inline ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix) +static ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix) { ext4_fsblk_t block; @@ -75,7 +75,7 @@ static inline ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix) * stores a large physical block number into an extent struct, * breaking it into parts */ -static inline void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb) +static void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb) { ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff)); ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff); @@ -86,7 +86,7 @@ static inline void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb * stores a large physical block number into an index struct, * breaking it into parts */ -static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb) +static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb) { ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff)); ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff); @@ -186,7 +186,8 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, depth = path->p_depth; /* try to predict block placement */ - if ((ex = path[depth].p_ext)) + ex = path[depth].p_ext; + if (ex) return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block)); /* it looks like index is empty; @@ -215,7 +216,7 @@ ext4_ext_new_block(handle_t *handle, struct inode *inode, return newblock; } -static inline int ext4_ext_space_block(struct inode *inode) +static int ext4_ext_space_block(struct inode *inode) { int size; @@ -228,7 +229,7 @@ static inline int ext4_ext_space_block(struct inode *inode) return size; } -static inline int ext4_ext_space_block_idx(struct inode *inode) +static int ext4_ext_space_block_idx(struct inode *inode) { int size; @@ -241,7 +242,7 @@ static inline int ext4_ext_space_block_idx(struct inode *inode) return size; } -static inline int ext4_ext_space_root(struct inode *inode) +static int ext4_ext_space_root(struct inode *inode) { int size; @@ -255,7 +256,7 @@ static inline int ext4_ext_space_root(struct inode *inode) return size; } -static inline int ext4_ext_space_root_idx(struct inode *inode) +static int ext4_ext_space_root_idx(struct inode *inode) { int size; @@ -476,13 +477,12 @@ ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path) /* account possible depth increase */ if (!path) { - path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 2), + path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), GFP_NOFS); if (!path) return ERR_PTR(-ENOMEM); alloc = 1; } - memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1)); path[0].p_hdr = eh; /* walk through the tree */ @@ -543,7 +543,8 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, struct ext4_extent_idx *ix; int len, err; - if ((err = ext4_ext_get_access(handle, inode, curp))) + err = ext4_ext_get_access(handle, inode, curp); + if (err) return err; BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block)); @@ -641,10 +642,9 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, * We need this to handle errors and free blocks * upon them. */ - ablocks = kmalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); + ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); if (!ablocks) return -ENOMEM; - memset(ablocks, 0, sizeof(ext4_fsblk_t) * depth); /* allocate all needed blocks */ ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); @@ -665,7 +665,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, } lock_buffer(bh); - if ((err = ext4_journal_get_create_access(handle, bh))) + err = ext4_journal_get_create_access(handle, bh); + if (err) goto cleanup; neh = ext_block_hdr(bh); @@ -702,18 +703,21 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, set_buffer_uptodate(bh); unlock_buffer(bh); - if ((err = ext4_journal_dirty_metadata(handle, bh))) + err = ext4_journal_dirty_metadata(handle, bh); + if (err) goto cleanup; brelse(bh); bh = NULL; /* correct old leaf */ if (m) { - if ((err = ext4_ext_get_access(handle, inode, path + depth))) + err = ext4_ext_get_access(handle, inode, path + depth); + if (err) goto cleanup; path[depth].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m); - if ((err = ext4_ext_dirty(handle, inode, path + depth))) + err = ext4_ext_dirty(handle, inode, path + depth); + if (err) goto cleanup; } @@ -736,7 +740,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, } lock_buffer(bh); - if ((err = ext4_journal_get_create_access(handle, bh))) + err = ext4_journal_get_create_access(handle, bh); + if (err) goto cleanup; neh = ext_block_hdr(bh); @@ -780,7 +785,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, set_buffer_uptodate(bh); unlock_buffer(bh); - if ((err = ext4_journal_dirty_metadata(handle, bh))) + err = ext4_journal_dirty_metadata(handle, bh); + if (err) goto cleanup; brelse(bh); bh = NULL; @@ -800,9 +806,6 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, } /* insert new index */ - if (err) - goto cleanup; - err = ext4_ext_insert_index(handle, inode, path + at, le32_to_cpu(border), newblock); @@ -857,7 +860,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, } lock_buffer(bh); - if ((err = ext4_journal_get_create_access(handle, bh))) { + err = ext4_journal_get_create_access(handle, bh); + if (err) { unlock_buffer(bh); goto out; } @@ -877,11 +881,13 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, set_buffer_uptodate(bh); unlock_buffer(bh); - if ((err = ext4_journal_dirty_metadata(handle, bh))) + err = ext4_journal_dirty_metadata(handle, bh); + if (err) goto out; /* create index in new top-level index: num,max,pointer */ - if ((err = ext4_ext_get_access(handle, inode, curp))) + err = ext4_ext_get_access(handle, inode, curp); + if (err) goto out; curp->p_hdr->eh_magic = EXT4_EXT_MAGIC; @@ -1073,27 +1079,31 @@ int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, */ k = depth - 1; border = path[depth].p_ext->ee_block; - if ((err = ext4_ext_get_access(handle, inode, path + k))) + err = ext4_ext_get_access(handle, inode, path + k); + if (err) return err; path[k].p_idx->ei_block = border; - if ((err = ext4_ext_dirty(handle, inode, path + k))) + err = ext4_ext_dirty(handle, inode, path + k); + if (err) return err; while (k--) { /* change all left-side indexes */ if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) break; - if ((err = ext4_ext_get_access(handle, inode, path + k))) + err = ext4_ext_get_access(handle, inode, path + k); + if (err) break; path[k].p_idx->ei_block = border; - if ((err = ext4_ext_dirty(handle, inode, path + k))) + err = ext4_ext_dirty(handle, inode, path + k); + if (err) break; } return err; } -static int inline +static int ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, struct ext4_extent *ex2) { @@ -1145,7 +1155,8 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, le16_to_cpu(newext->ee_len), le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len), ext_pblock(ex)); - if ((err = ext4_ext_get_access(handle, inode, path + depth))) + err = ext4_ext_get_access(handle, inode, path + depth); + if (err) return err; ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len) + le16_to_cpu(newext->ee_len)); @@ -1195,7 +1206,8 @@ repeat: has_space: nearex = path[depth].p_ext; - if ((err = ext4_ext_get_access(handle, inode, path + depth))) + err = ext4_ext_get_access(handle, inode, path + depth); + if (err) goto cleanup; if (!nearex) { @@ -1383,7 +1395,7 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block, return err; } -static inline void +static void ext4_ext_put_in_cache(struct inode *inode, __u32 block, __u32 len, __u32 start, int type) { @@ -1401,7 +1413,7 @@ ext4_ext_put_in_cache(struct inode *inode, __u32 block, * calculate boundaries of the gap that the requested block fits into * and cache this gap */ -static inline void +static void ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, unsigned long block) { @@ -1442,7 +1454,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP); } -static inline int +static int ext4_ext_in_cache(struct inode *inode, unsigned long block, struct ext4_extent *ex) { @@ -1489,10 +1501,12 @@ int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, path--; leaf = idx_pblock(path->p_idx); BUG_ON(path->p_hdr->eh_entries == 0); - if ((err = ext4_ext_get_access(handle, inode, path))) + err = ext4_ext_get_access(handle, inode, path); + if (err) return err; path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1); - if ((err = ext4_ext_dirty(handle, inode, path))) + err = ext4_ext_dirty(handle, inode, path); + if (err) return err; ext_debug("index is empty, remove it, free block %llu\n", leaf); bh = sb_find_get_block(inode->i_sb, leaf); @@ -1509,7 +1523,7 @@ int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, * the caller should calculate credits under truncate_mutex and * pass the actual path. */ -int inline ext4_ext_calc_credits_for_insert(struct inode *inode, +int ext4_ext_calc_credits_for_insert(struct inode *inode, struct ext4_ext_path *path) { int depth, needed; @@ -1534,16 +1548,17 @@ int inline ext4_ext_calc_credits_for_insert(struct inode *inode, /* * tree can be full, so it would need to grow in depth: - * allocation + old root + new root + * we need one credit to modify old root, credits for + * new root will be added in split accounting */ - needed += 2 + 1 + 1; + needed += 1; /* * Index split can happen, we would need: * allocate intermediate indexes (bitmap + group) * + change two blocks at each level, but root (already included) */ - needed = (depth * 2) + (depth * 2); + needed += (depth * 2) + (depth * 2); /* any allocation modifies superblock */ needed += 1; @@ -1718,7 +1733,7 @@ out: * ext4_ext_more_to_rm: * returns 1 if current index has to be freed (even partial) */ -static int inline +static int ext4_ext_more_to_rm(struct ext4_ext_path *path) { BUG_ON(path->p_idx == NULL); @@ -1756,12 +1771,11 @@ int ext4_ext_remove_space(struct inode *inode, unsigned long start) * We start scanning from right side, freeing all the blocks * after i_size and walking into the tree depth-wise. */ - path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL); + path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL); if (path == NULL) { ext4_journal_stop(handle); return -ENOMEM; } - memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1)); path[0].p_hdr = ext_inode_hdr(inode); if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) { err = -EIO; @@ -1932,7 +1946,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, mutex_lock(&EXT4_I(inode)->truncate_mutex); /* check in cache */ - if ((goal = ext4_ext_in_cache(inode, iblock, &newex))) { + goal = ext4_ext_in_cache(inode, iblock, &newex); + if (goal) { if (goal == EXT4_EXT_CACHE_GAP) { if (!create) { /* block isn't allocated yet and @@ -1971,7 +1986,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, */ BUG_ON(path[depth].p_ext == NULL && depth != 0); - if ((ex = path[depth].p_ext)) { + ex = path[depth].p_ext; + if (ex) { unsigned long ee_block = le32_to_cpu(ex->ee_block); ext4_fsblk_t ee_start = ext_pblock(ex); unsigned short ee_len = le16_to_cpu(ex->ee_len); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 0a60ec5a16db..1d85d4ec9598 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1147,37 +1147,102 @@ static int do_journal_get_write_access(handle_t *handle, return ext4_journal_get_write_access(handle, bh); } +/* + * The idea of this helper function is following: + * if prepare_write has allocated some blocks, but not all of them, the + * transaction must include the content of the newly allocated blocks. + * This content is expected to be set to zeroes by block_prepare_write(). + * 2006/10/14 SAW + */ +static int ext4_prepare_failure(struct file *file, struct page *page, + unsigned from, unsigned to) +{ + struct address_space *mapping; + struct buffer_head *bh, *head, *next; + unsigned block_start, block_end; + unsigned blocksize; + int ret; + handle_t *handle = ext4_journal_current_handle(); + + mapping = page->mapping; + if (ext4_should_writeback_data(mapping->host)) { + /* optimization: no constraints about data */ +skip: + return ext4_journal_stop(handle); + } + + head = page_buffers(page); + blocksize = head->b_size; + for ( bh = head, block_start = 0; + bh != head || !block_start; + block_start = block_end, bh = next) + { + next = bh->b_this_page; + block_end = block_start + blocksize; + if (block_end <= from) + continue; + if (block_start >= to) { + block_start = to; + break; + } + if (!buffer_mapped(bh)) + /* prepare_write failed on this bh */ + break; + if (ext4_should_journal_data(mapping->host)) { + ret = do_journal_get_write_access(handle, bh); + if (ret) { + ext4_journal_stop(handle); + return ret; + } + } + /* + * block_start here becomes the first block where the current iteration + * of prepare_write failed. + */ + } + if (block_start <= from) + goto skip; + + /* commit allocated and zeroed buffers */ + return mapping->a_ops->commit_write(file, page, from, block_start); +} + static int ext4_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to) { struct inode *inode = page->mapping->host; - int ret, needed_blocks = ext4_writepage_trans_blocks(inode); + int ret, ret2; + int needed_blocks = ext4_writepage_trans_blocks(inode); handle_t *handle; int retries = 0; retry: handle = ext4_journal_start(inode, needed_blocks); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - goto out; - } + if (IS_ERR(handle)) + return PTR_ERR(handle); if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) ret = nobh_prepare_write(page, from, to, ext4_get_block); else ret = block_prepare_write(page, from, to, ext4_get_block); if (ret) - goto prepare_write_failed; + goto failure; if (ext4_should_journal_data(inode)) { ret = walk_page_buffers(handle, page_buffers(page), from, to, NULL, do_journal_get_write_access); + if (ret) + /* fatal error, just put the handle and return */ + journal_stop(handle); } -prepare_write_failed: - if (ret) - ext4_journal_stop(handle); + return ret; + +failure: + ret2 = ext4_prepare_failure(file, page, from, to); + if (ret2 < 0) + return ret2; if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; -out: + /* retry number exceeded, or other error like -EDQUOT */ return ret; } diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 8b1bd03d20f5..859990eac504 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -552,6 +552,15 @@ static int htree_dirblock_to_tree(struct file *dir_file, dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0)); for (; de < top; de = ext4_next_entry(de)) { + if (!ext4_check_dir_entry("htree_dirblock_to_tree", dir, de, bh, + (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb)) + +((char *)de - bh->b_data))) { + /* On error, skip the f_pos to the next block. */ + dir_file->f_pos = (dir_file->f_pos | + (dir->i_sb->s_blocksize - 1)) + 1; + brelse (bh); + return count; + } ext4fs_dirhash(de->name, de->name_len, hinfo); if ((hinfo->hash < start_hash) || ((hinfo->hash == start_hash) && diff --git a/fs/ext4/super.c b/fs/ext4/super.c index b4b022aa2bc2..486a641ca71b 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -486,7 +486,7 @@ static void ext4_put_super (struct super_block * sb) return; } -static kmem_cache_t *ext4_inode_cachep; +static struct kmem_cache *ext4_inode_cachep; /* * Called inside transaction, so use GFP_NOFS @@ -495,7 +495,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) { struct ext4_inode_info *ei; - ei = kmem_cache_alloc(ext4_inode_cachep, SLAB_NOFS); + ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS); if (!ei) return NULL; #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL @@ -513,7 +513,7 @@ static void ext4_destroy_inode(struct inode *inode) kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; @@ -1321,6 +1321,12 @@ static void ext4_orphan_cleanup (struct super_block * sb, return; } + if (bdev_read_only(sb->s_bdev)) { + printk(KERN_ERR "EXT4-fs: write access " + "unavailable, skipping orphan cleanup.\n"); + return; + } + if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { if (es->s_last_orphan) jbd_debug(1, "Errors on filesystem, " @@ -2460,6 +2466,7 @@ static int ext4_statfs (struct dentry * dentry, struct kstatfs * buf) struct ext4_super_block *es = sbi->s_es; ext4_fsblk_t overhead; int i; + u64 fsid; if (test_opt (sb, MINIX_DF)) overhead = 0; @@ -2506,6 +2513,10 @@ static int ext4_statfs (struct dentry * dentry, struct kstatfs * buf) buf->f_files = le32_to_cpu(es->s_inodes_count); buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter); buf->f_namelen = EXT4_NAME_LEN; + fsid = le64_to_cpup((void *)es->s_uuid) ^ + le64_to_cpup((void *)es->s_uuid + sizeof(u64)); + buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; + buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; return 0; } diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 63233cd946a7..dc969c357aa1 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -459,14 +459,11 @@ static void ext4_xattr_update_super_block(handle_t *handle, if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR)) return; - lock_super(sb); if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) { - EXT4_SB(sb)->s_es->s_feature_compat |= - cpu_to_le32(EXT4_FEATURE_COMPAT_EXT_ATTR); + EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR); sb->s_dirt = 1; ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh); } - unlock_super(sb); } /* diff --git a/fs/fat/cache.c b/fs/fat/cache.c index 82cc4f59e3ba..05c2941c74f2 100644 --- a/fs/fat/cache.c +++ b/fs/fat/cache.c @@ -34,9 +34,9 @@ static inline int fat_max_cache(struct inode *inode) return FAT_MAX_CACHE; } -static kmem_cache_t *fat_cache_cachep; +static struct kmem_cache *fat_cache_cachep; -static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) +static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct fat_cache *cache = (struct fat_cache *)foo; @@ -63,7 +63,7 @@ void fat_cache_destroy(void) static inline struct fat_cache *fat_cache_alloc(struct inode *inode) { - return kmem_cache_alloc(fat_cache_cachep, SLAB_KERNEL); + return kmem_cache_alloc(fat_cache_cachep, GFP_KERNEL); } static inline void fat_cache_free(struct fat_cache *cache) diff --git a/fs/fat/file.c b/fs/fat/file.c index 8337451e7897..0aa813d944a6 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -303,7 +303,17 @@ void fat_truncate(struct inode *inode) fat_flush_inodes(inode->i_sb, inode, NULL); } +int fat_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +{ + struct inode *inode = dentry->d_inode; + generic_fillattr(inode, stat); + stat->blksize = MSDOS_SB(inode->i_sb)->cluster_size; + return 0; +} +EXPORT_SYMBOL_GPL(fat_getattr); + struct inode_operations fat_file_inode_operations = { .truncate = fat_truncate, .setattr = fat_notify_change, + .getattr = fat_getattr, }; diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 78945b53b0f8..a9e4688582a2 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -477,12 +477,12 @@ static void fat_put_super(struct super_block *sb) kfree(sbi); } -static kmem_cache_t *fat_inode_cachep; +static struct kmem_cache *fat_inode_cachep; static struct inode *fat_alloc_inode(struct super_block *sb) { struct msdos_inode_info *ei; - ei = kmem_cache_alloc(fat_inode_cachep, SLAB_KERNEL); + ei = kmem_cache_alloc(fat_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -493,7 +493,7 @@ static void fat_destroy_inode(struct inode *inode) kmem_cache_free(fat_inode_cachep, MSDOS_I(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct msdos_inode_info *ei = (struct msdos_inode_info *)foo; diff --git a/fs/fcntl.c b/fs/fcntl.c index e4f26165f12a..4740d35e52cd 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -553,7 +553,7 @@ int send_sigurg(struct fown_struct *fown) } static DEFINE_RWLOCK(fasync_lock); -static kmem_cache_t *fasync_cache __read_mostly; +static struct kmem_cache *fasync_cache __read_mostly; /* * fasync_helper() is used by some character device drivers (mainly mice) @@ -567,7 +567,7 @@ int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fap int result = 0; if (on) { - new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL); + new = kmem_cache_alloc(fasync_cache, GFP_KERNEL); if (!new) return -ENOMEM; } diff --git a/fs/file.c b/fs/file.c index 8e81775c5dc8..51aef675470f 100644 --- a/fs/file.c +++ b/fs/file.c @@ -21,7 +21,6 @@ struct fdtable_defer { spinlock_t lock; struct work_struct wq; - struct timer_list timer; struct fdtable *next; }; @@ -75,24 +74,10 @@ static void __free_fdtable(struct fdtable *fdt) kfree(fdt); } -static void fdtable_timer(unsigned long data) -{ - struct fdtable_defer *fddef = (struct fdtable_defer *)data; - - spin_lock(&fddef->lock); - /* - * If someone already emptied the queue return. - */ - if (!fddef->next) - goto out; - if (!schedule_work(&fddef->wq)) - mod_timer(&fddef->timer, 5); -out: - spin_unlock(&fddef->lock); -} - -static void free_fdtable_work(struct fdtable_defer *f) +static void free_fdtable_work(struct work_struct *work) { + struct fdtable_defer *f = + container_of(work, struct fdtable_defer, wq); struct fdtable *fdt; spin_lock_bh(&f->lock); @@ -142,13 +127,8 @@ static void free_fdtable_rcu(struct rcu_head *rcu) spin_lock(&fddef->lock); fdt->next = fddef->next; fddef->next = fdt; - /* - * vmallocs are handled from the workqueue context. - * If the per-cpu workqueue is running, then we - * defer work scheduling through a timer. - */ - if (!schedule_work(&fddef->wq)) - mod_timer(&fddef->timer, 5); + /* vmallocs are handled from the workqueue context */ + schedule_work(&fddef->wq); spin_unlock(&fddef->lock); put_cpu_var(fdtable_defer_list); } @@ -351,10 +331,7 @@ static void __devinit fdtable_defer_list_init(int cpu) { struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); spin_lock_init(&fddef->lock); - INIT_WORK(&fddef->wq, (void (*)(void *))free_fdtable_work, fddef); - init_timer(&fddef->timer); - fddef->timer.data = (unsigned long)fddef; - fddef->timer.function = fdtable_timer; + INIT_WORK(&fddef->wq, free_fdtable_work); fddef->next = NULL; } diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c index 4786d51ad3bd..0b7ae897cb78 100644 --- a/fs/freevxfs/vxfs_inode.c +++ b/fs/freevxfs/vxfs_inode.c @@ -46,7 +46,7 @@ extern const struct address_space_operations vxfs_immed_aops; extern struct inode_operations vxfs_immed_symlink_iops; -kmem_cache_t *vxfs_inode_cachep; +struct kmem_cache *vxfs_inode_cachep; #ifdef DIAGNOSTIC @@ -103,7 +103,7 @@ vxfs_blkiget(struct super_block *sbp, u_long extent, ino_t ino) struct vxfs_inode_info *vip; struct vxfs_dinode *dip; - if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL))) + if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL))) goto fail; dip = (struct vxfs_dinode *)(bp->b_data + offset); memcpy(vip, dip, sizeof(*vip)); @@ -145,7 +145,7 @@ __vxfs_iget(ino_t ino, struct inode *ilistp) struct vxfs_dinode *dip; caddr_t kaddr = (char *)page_address(pp); - if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL))) + if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL))) goto fail; dip = (struct vxfs_dinode *)(kaddr + offset); memcpy(vip, dip, sizeof(*vip)); diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 66571eafbb1e..357764d85ff1 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -19,7 +19,7 @@ MODULE_ALIAS_MISCDEV(FUSE_MINOR); -static kmem_cache_t *fuse_req_cachep; +static struct kmem_cache *fuse_req_cachep; static struct fuse_conn *fuse_get_conn(struct file *file) { @@ -41,7 +41,7 @@ static void fuse_request_init(struct fuse_req *req) struct fuse_req *fuse_request_alloc(void) { - struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL); + struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL); if (req) fuse_request_init(req); return req; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index cfc8f81e60d0..1cabdb229adb 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -138,11 +138,9 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) struct fuse_entry_out outarg; struct fuse_conn *fc; struct fuse_req *req; + struct fuse_req *forget_req; struct dentry *parent; - /* Doesn't hurt to "reset" the validity timeout */ - fuse_invalidate_entry_cache(entry); - /* For negative dentries, always do a fresh lookup */ if (!inode) return 0; @@ -152,25 +150,33 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) if (IS_ERR(req)) return 0; + forget_req = fuse_get_req(fc); + if (IS_ERR(forget_req)) { + fuse_put_request(fc, req); + return 0; + } + parent = dget_parent(entry); fuse_lookup_init(req, parent->d_inode, entry, &outarg); request_send(fc, req); dput(parent); err = req->out.h.error; + fuse_put_request(fc, req); /* Zero nodeid is same as -ENOENT */ if (!err && !outarg.nodeid) err = -ENOENT; if (!err) { struct fuse_inode *fi = get_fuse_inode(inode); if (outarg.nodeid != get_node_id(inode)) { - fuse_send_forget(fc, req, outarg.nodeid, 1); + fuse_send_forget(fc, forget_req, + outarg.nodeid, 1); return 0; } spin_lock(&fc->lock); fi->nlookup ++; spin_unlock(&fc->lock); } - fuse_put_request(fc, req); + fuse_put_request(fc, forget_req); if (err || (outarg.attr.mode ^ inode->i_mode) & S_IFMT) return 0; @@ -221,6 +227,7 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, struct inode *inode = NULL; struct fuse_conn *fc = get_fuse_conn(dir); struct fuse_req *req; + struct fuse_req *forget_req; if (entry->d_name.len > FUSE_NAME_MAX) return ERR_PTR(-ENAMETOOLONG); @@ -229,9 +236,16 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, if (IS_ERR(req)) return ERR_PTR(PTR_ERR(req)); + forget_req = fuse_get_req(fc); + if (IS_ERR(forget_req)) { + fuse_put_request(fc, req); + return ERR_PTR(PTR_ERR(forget_req)); + } + fuse_lookup_init(req, dir, entry, &outarg); request_send(fc, req); err = req->out.h.error; + fuse_put_request(fc, req); /* Zero nodeid is same as -ENOENT, but with valid timeout */ if (!err && outarg.nodeid && (invalid_nodeid(outarg.nodeid) || !valid_mode(outarg.attr.mode))) @@ -240,11 +254,11 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, &outarg.attr); if (!inode) { - fuse_send_forget(fc, req, outarg.nodeid, 1); + fuse_send_forget(fc, forget_req, outarg.nodeid, 1); return ERR_PTR(-ENOMEM); } } - fuse_put_request(fc, req); + fuse_put_request(fc, forget_req); if (err && err != -ENOENT) return ERR_PTR(err); @@ -388,6 +402,13 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, struct fuse_entry_out outarg; struct inode *inode; int err; + struct fuse_req *forget_req; + + forget_req = fuse_get_req(fc); + if (IS_ERR(forget_req)) { + fuse_put_request(fc, req); + return PTR_ERR(forget_req); + } req->in.h.nodeid = get_node_id(dir); req->out.numargs = 1; @@ -395,24 +416,24 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, req->out.args[0].value = &outarg; request_send(fc, req); err = req->out.h.error; - if (err) { - fuse_put_request(fc, req); - return err; - } + fuse_put_request(fc, req); + if (err) + goto out_put_forget_req; + err = -EIO; if (invalid_nodeid(outarg.nodeid)) - goto out_put_request; + goto out_put_forget_req; if ((outarg.attr.mode ^ mode) & S_IFMT) - goto out_put_request; + goto out_put_forget_req; inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, &outarg.attr); if (!inode) { - fuse_send_forget(fc, req, outarg.nodeid, 1); + fuse_send_forget(fc, forget_req, outarg.nodeid, 1); return -ENOMEM; } - fuse_put_request(fc, req); + fuse_put_request(fc, forget_req); if (S_ISDIR(inode->i_mode)) { struct dentry *alias; @@ -434,8 +455,8 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, fuse_invalidate_attr(dir); return 0; - out_put_request: - fuse_put_request(fc, req); + out_put_forget_req: + fuse_put_request(fc, forget_req); return err; } @@ -1003,6 +1024,8 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr) if (attr->ia_valid & ATTR_SIZE) { unsigned long limit; is_truncate = 1; + if (IS_SWAPFILE(inode)) + return -ETXTBSY; limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; if (limit != RLIM_INFINITY && attr->ia_size > (loff_t) limit) { send_sig(SIGXFSZ, current, 0); diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 763a50daf1c0..128f79c40803 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -754,6 +754,42 @@ static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) return err; } +static sector_t fuse_bmap(struct address_space *mapping, sector_t block) +{ + struct inode *inode = mapping->host; + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_req *req; + struct fuse_bmap_in inarg; + struct fuse_bmap_out outarg; + int err; + + if (!inode->i_sb->s_bdev || fc->no_bmap) + return 0; + + req = fuse_get_req(fc); + if (IS_ERR(req)) + return 0; + + memset(&inarg, 0, sizeof(inarg)); + inarg.block = block; + inarg.blocksize = inode->i_sb->s_blocksize; + req->in.h.opcode = FUSE_BMAP; + req->in.h.nodeid = get_node_id(inode); + req->in.numargs = 1; + req->in.args[0].size = sizeof(inarg); + req->in.args[0].value = &inarg; + req->out.numargs = 1; + req->out.args[0].size = sizeof(outarg); + req->out.args[0].value = &outarg; + request_send(fc, req); + err = req->out.h.error; + fuse_put_request(fc, req); + if (err == -ENOSYS) + fc->no_bmap = 1; + + return err ? 0 : outarg.block; +} + static const struct file_operations fuse_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, @@ -787,6 +823,7 @@ static const struct address_space_operations fuse_file_aops = { .commit_write = fuse_commit_write, .readpages = fuse_readpages, .set_page_dirty = fuse_set_page_dirty, + .bmap = fuse_bmap, }; void fuse_init_file_inode(struct inode *inode) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 91edb8932d90..b98b20de7405 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -298,6 +298,9 @@ struct fuse_conn { reply, before any other request, and never cleared */ unsigned conn_error : 1; + /** Connection successful. Only set in INIT */ + unsigned conn_init : 1; + /** Do readpages asynchronously? Only set in INIT */ unsigned async_read : 1; @@ -339,6 +342,9 @@ struct fuse_conn { /** Is interrupt not implemented by fs? */ unsigned no_interrupt : 1; + /** Is bmap not implemented by fs? */ + unsigned no_bmap : 1; + /** The number of requests waiting for completion */ atomic_t num_waiting; @@ -365,6 +371,9 @@ struct fuse_conn { /** Key for lock owner ID scrambling */ u32 scramble_key[4]; + + /** Reserved request for the DESTROY message */ + struct fuse_req *destroy_req; }; static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb) diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index fc4203570370..12450d2b320e 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -22,7 +22,7 @@ MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); MODULE_DESCRIPTION("Filesystem in Userspace"); MODULE_LICENSE("GPL"); -static kmem_cache_t *fuse_inode_cachep; +static struct kmem_cache *fuse_inode_cachep; struct list_head fuse_conn_list; DEFINE_MUTEX(fuse_mutex); @@ -39,6 +39,7 @@ struct fuse_mount_data { unsigned group_id_present : 1; unsigned flags; unsigned max_read; + unsigned blksize; }; static struct inode *fuse_alloc_inode(struct super_block *sb) @@ -46,7 +47,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb) struct inode *inode; struct fuse_inode *fi; - inode = kmem_cache_alloc(fuse_inode_cachep, SLAB_KERNEL); + inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL); if (!inode) return NULL; @@ -205,10 +206,23 @@ static void fuse_umount_begin(struct vfsmount *vfsmnt, int flags) fuse_abort_conn(get_fuse_conn_super(vfsmnt->mnt_sb)); } +static void fuse_send_destroy(struct fuse_conn *fc) +{ + struct fuse_req *req = fc->destroy_req; + if (req && fc->conn_init) { + fc->destroy_req = NULL; + req->in.h.opcode = FUSE_DESTROY; + req->force = 1; + request_send(fc, req); + fuse_put_request(fc, req); + } +} + static void fuse_put_super(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); + fuse_send_destroy(fc); spin_lock(&fc->lock); fc->connected = 0; fc->blocked = 0; @@ -274,6 +288,7 @@ enum { OPT_DEFAULT_PERMISSIONS, OPT_ALLOW_OTHER, OPT_MAX_READ, + OPT_BLKSIZE, OPT_ERR }; @@ -285,14 +300,16 @@ static match_table_t tokens = { {OPT_DEFAULT_PERMISSIONS, "default_permissions"}, {OPT_ALLOW_OTHER, "allow_other"}, {OPT_MAX_READ, "max_read=%u"}, + {OPT_BLKSIZE, "blksize=%u"}, {OPT_ERR, NULL} }; -static int parse_fuse_opt(char *opt, struct fuse_mount_data *d) +static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) { char *p; memset(d, 0, sizeof(struct fuse_mount_data)); d->max_read = ~0; + d->blksize = 512; while ((p = strsep(&opt, ",")) != NULL) { int token; @@ -345,6 +362,12 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d) d->max_read = value; break; + case OPT_BLKSIZE: + if (!is_bdev || match_int(&args[0], &value)) + return 0; + d->blksize = value; + break; + default: return 0; } @@ -400,6 +423,8 @@ static struct fuse_conn *new_conn(void) void fuse_conn_put(struct fuse_conn *fc) { if (atomic_dec_and_test(&fc->count)) { + if (fc->destroy_req) + fuse_request_free(fc->destroy_req); mutex_destroy(&fc->inst_mutex); kfree(fc); } @@ -456,6 +481,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages); fc->minor = arg->minor; fc->max_write = arg->minor < 5 ? 4096 : arg->max_write; + fc->conn_init = 1; } fuse_put_request(fc, req); fc->blocked = 0; @@ -500,15 +526,23 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) struct dentry *root_dentry; struct fuse_req *init_req; int err; + int is_bdev = sb->s_bdev != NULL; if (sb->s_flags & MS_MANDLOCK) return -EINVAL; - if (!parse_fuse_opt((char *) data, &d)) + if (!parse_fuse_opt((char *) data, &d, is_bdev)) return -EINVAL; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + if (is_bdev) { +#ifdef CONFIG_BLOCK + if (!sb_set_blocksize(sb, d.blksize)) + return -EINVAL; +#endif + } else { + sb->s_blocksize = PAGE_CACHE_SIZE; + sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + } sb->s_magic = FUSE_SUPER_MAGIC; sb->s_op = &fuse_super_operations; sb->s_maxbytes = MAX_LFS_FILESIZE; @@ -547,6 +581,12 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) if (!init_req) goto err_put_root; + if (is_bdev) { + fc->destroy_req = fuse_request_alloc(); + if (!fc->destroy_req) + goto err_put_root; + } + mutex_lock(&fuse_mutex); err = -EINVAL; if (file->private_data) @@ -598,10 +638,47 @@ static struct file_system_type fuse_fs_type = { .kill_sb = kill_anon_super, }; +#ifdef CONFIG_BLOCK +static int fuse_get_sb_blk(struct file_system_type *fs_type, + int flags, const char *dev_name, + void *raw_data, struct vfsmount *mnt) +{ + return get_sb_bdev(fs_type, flags, dev_name, raw_data, fuse_fill_super, + mnt); +} + +static struct file_system_type fuseblk_fs_type = { + .owner = THIS_MODULE, + .name = "fuseblk", + .get_sb = fuse_get_sb_blk, + .kill_sb = kill_block_super, + .fs_flags = FS_REQUIRES_DEV, +}; + +static inline int register_fuseblk(void) +{ + return register_filesystem(&fuseblk_fs_type); +} + +static inline void unregister_fuseblk(void) +{ + unregister_filesystem(&fuseblk_fs_type); +} +#else +static inline int register_fuseblk(void) +{ + return 0; +} + +static inline void unregister_fuseblk(void) +{ +} +#endif + static decl_subsys(fuse, NULL, NULL); static decl_subsys(connections, NULL, NULL); -static void fuse_inode_init_once(void *foo, kmem_cache_t *cachep, +static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct inode * inode = foo; @@ -617,24 +694,34 @@ static int __init fuse_fs_init(void) err = register_filesystem(&fuse_fs_type); if (err) - printk("fuse: failed to register filesystem\n"); - else { - fuse_inode_cachep = kmem_cache_create("fuse_inode", - sizeof(struct fuse_inode), - 0, SLAB_HWCACHE_ALIGN, - fuse_inode_init_once, NULL); - if (!fuse_inode_cachep) { - unregister_filesystem(&fuse_fs_type); - err = -ENOMEM; - } - } + goto out; + err = register_fuseblk(); + if (err) + goto out_unreg; + + fuse_inode_cachep = kmem_cache_create("fuse_inode", + sizeof(struct fuse_inode), + 0, SLAB_HWCACHE_ALIGN, + fuse_inode_init_once, NULL); + err = -ENOMEM; + if (!fuse_inode_cachep) + goto out_unreg2; + + return 0; + + out_unreg2: + unregister_fuseblk(); + out_unreg: + unregister_filesystem(&fuse_fs_type); + out: return err; } static void fuse_fs_cleanup(void) { unregister_filesystem(&fuse_fs_type); + unregister_fuseblk(); kmem_cache_destroy(fuse_inode_cachep); } diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig index 8c27de8b9568..c0791cbacad9 100644 --- a/fs/gfs2/Kconfig +++ b/fs/gfs2/Kconfig @@ -2,6 +2,7 @@ config GFS2_FS tristate "GFS2 file system support" depends on EXPERIMENTAL select FS_POSIX_ACL + select CRC32 help A cluster filesystem. diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c index 5f959b8ce406..6e80844367ee 100644 --- a/fs/gfs2/acl.c +++ b/fs/gfs2/acl.c @@ -74,11 +74,11 @@ int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access) { if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl) return -EOPNOTSUPP; - if (current->fsuid != ip->i_di.di_uid && !capable(CAP_FOWNER)) + if (current->fsuid != ip->i_inode.i_uid && !capable(CAP_FOWNER)) return -EPERM; - if (S_ISLNK(ip->i_di.di_mode)) + if (S_ISLNK(ip->i_inode.i_mode)) return -EOPNOTSUPP; - if (!access && !S_ISDIR(ip->i_di.di_mode)) + if (!access && !S_ISDIR(ip->i_inode.i_mode)) return -EACCES; return 0; @@ -145,14 +145,14 @@ out: } /** - * gfs2_check_acl_locked - Check an ACL to see if we're allowed to do something + * gfs2_check_acl - Check an ACL to see if we're allowed to do something * @inode: the file we want to do something to * @mask: what we want to do * * Returns: errno */ -int gfs2_check_acl_locked(struct inode *inode, int mask) +int gfs2_check_acl(struct inode *inode, int mask) { struct posix_acl *acl = NULL; int error; @@ -170,21 +170,6 @@ int gfs2_check_acl_locked(struct inode *inode, int mask) return -EAGAIN; } -int gfs2_check_acl(struct inode *inode, int mask) -{ - struct gfs2_inode *ip = GFS2_I(inode); - struct gfs2_holder i_gh; - int error; - - error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); - if (!error) { - error = gfs2_check_acl_locked(inode, mask); - gfs2_glock_dq_uninit(&i_gh); - } - - return error; -} - static int munge_mode(struct gfs2_inode *ip, mode_t mode) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); @@ -198,10 +183,10 @@ static int munge_mode(struct gfs2_inode *ip, mode_t mode) error = gfs2_meta_inode_buffer(ip, &dibh); if (!error) { gfs2_assert_withdraw(sdp, - (ip->i_di.di_mode & S_IFMT) == (mode & S_IFMT)); - ip->i_di.di_mode = mode; + (ip->i_inode.i_mode & S_IFMT) == (mode & S_IFMT)); + ip->i_inode.i_mode = mode; gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); } @@ -215,12 +200,12 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip) struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct posix_acl *acl = NULL, *clone; struct gfs2_ea_request er; - mode_t mode = ip->i_di.di_mode; + mode_t mode = ip->i_inode.i_mode; int error; if (!sdp->sd_args.ar_posix_acl) return 0; - if (S_ISLNK(ip->i_di.di_mode)) + if (S_ISLNK(ip->i_inode.i_mode)) return 0; memset(&er, 0, sizeof(struct gfs2_ea_request)); @@ -232,7 +217,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip) return error; if (!acl) { mode &= ~current->fs->umask; - if (mode != ip->i_di.di_mode) + if (mode != ip->i_inode.i_mode) error = munge_mode(ip, mode); return error; } @@ -244,7 +229,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip) posix_acl_release(acl); acl = clone; - if (S_ISDIR(ip->i_di.di_mode)) { + if (S_ISDIR(ip->i_inode.i_mode)) { er.er_name = GFS2_POSIX_ACL_DEFAULT; er.er_name_len = GFS2_POSIX_ACL_DEFAULT_LEN; error = gfs2_system_eaops.eo_set(ip, &er); diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h index 05c294fe0d78..6751930bfb64 100644 --- a/fs/gfs2/acl.h +++ b/fs/gfs2/acl.h @@ -31,7 +31,6 @@ int gfs2_acl_validate_set(struct gfs2_inode *ip, int access, struct gfs2_ea_request *er, int *remove, mode_t *mode); int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access); -int gfs2_check_acl_locked(struct inode *inode, int mask); int gfs2_check_acl(struct inode *inode, int mask); int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip); int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr); diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 06e9a8cb45e9..8240c1ff94f4 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -38,8 +38,8 @@ struct metapath { }; typedef int (*block_call_t) (struct gfs2_inode *ip, struct buffer_head *dibh, - struct buffer_head *bh, u64 *top, - u64 *bottom, unsigned int height, + struct buffer_head *bh, __be64 *top, + __be64 *bottom, unsigned int height, void *data); struct strip_mine { @@ -163,6 +163,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page) if (ip->i_di.di_size) { *(__be64 *)(di + 1) = cpu_to_be64(block); ip->i_di.di_blocks++; + gfs2_set_inode_blocks(&ip->i_inode); di->di_blocks = cpu_to_be64(ip->i_di.di_blocks); } @@ -230,7 +231,7 @@ static int build_height(struct inode *inode, unsigned height) struct buffer_head *blocks[GFS2_MAX_META_HEIGHT]; struct gfs2_dinode *di; int error; - u64 *bp; + __be64 *bp; u64 bn; unsigned n; @@ -255,7 +256,7 @@ static int build_height(struct inode *inode, unsigned height) GFS2_FORMAT_IN); gfs2_buffer_clear_tail(blocks[n], sizeof(struct gfs2_meta_header)); - bp = (u64 *)(blocks[n]->b_data + + bp = (__be64 *)(blocks[n]->b_data + sizeof(struct gfs2_meta_header)); *bp = cpu_to_be64(blocks[n+1]->b_blocknr); brelse(blocks[n]); @@ -272,6 +273,7 @@ static int build_height(struct inode *inode, unsigned height) *(__be64 *)(di + 1) = cpu_to_be64(bn); ip->i_di.di_height += new_height; ip->i_di.di_blocks += new_height; + gfs2_set_inode_blocks(&ip->i_inode); di->di_height = cpu_to_be16(ip->i_di.di_height); di->di_blocks = cpu_to_be64(ip->i_di.di_blocks); brelse(dibh); @@ -360,15 +362,15 @@ static void find_metapath(struct gfs2_inode *ip, u64 block, * metadata tree. */ -static inline u64 *metapointer(struct buffer_head *bh, int *boundary, +static inline __be64 *metapointer(struct buffer_head *bh, int *boundary, unsigned int height, const struct metapath *mp) { unsigned int head_size = (height > 0) ? sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode); - u64 *ptr; + __be64 *ptr; *boundary = 0; - ptr = ((u64 *)(bh->b_data + head_size)) + mp->mp_list[height]; - if (ptr + 1 == (u64 *)(bh->b_data + bh->b_size)) + ptr = ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height]; + if (ptr + 1 == (__be64 *)(bh->b_data + bh->b_size)) *boundary = 1; return ptr; } @@ -394,7 +396,7 @@ static int lookup_block(struct gfs2_inode *ip, struct buffer_head *bh, int *new, u64 *block) { int boundary; - u64 *ptr = metapointer(bh, &boundary, height, mp); + __be64 *ptr = metapointer(bh, &boundary, height, mp); if (*ptr) { *block = be64_to_cpu(*ptr); @@ -415,17 +417,35 @@ static int lookup_block(struct gfs2_inode *ip, struct buffer_head *bh, *ptr = cpu_to_be64(*block); ip->i_di.di_blocks++; + gfs2_set_inode_blocks(&ip->i_inode); *new = 1; return 0; } +static inline void bmap_lock(struct inode *inode, int create) +{ + struct gfs2_inode *ip = GFS2_I(inode); + if (create) + down_write(&ip->i_rw_mutex); + else + down_read(&ip->i_rw_mutex); +} + +static inline void bmap_unlock(struct inode *inode, int create) +{ + struct gfs2_inode *ip = GFS2_I(inode); + if (create) + up_write(&ip->i_rw_mutex); + else + up_read(&ip->i_rw_mutex); +} + /** - * gfs2_block_pointers - Map a block from an inode to a disk block + * gfs2_block_map - Map a block from an inode to a disk block * @inode: The inode * @lblock: The logical block number - * @map_bh: The bh to be mapped - * @mp: metapath to use + * @bh_map: The bh to be mapped * * Find the block number on the current device which corresponds to an * inode's block. If the block had to be created, "new" will be set. @@ -433,8 +453,8 @@ static int lookup_block(struct gfs2_inode *ip, struct buffer_head *bh, * Returns: errno */ -static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create, - struct buffer_head *bh_map, struct metapath *mp) +int gfs2_block_map(struct inode *inode, u64 lblock, int create, + struct buffer_head *bh_map) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); @@ -448,57 +468,61 @@ static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create, u64 dblock = 0; int boundary; unsigned int maxlen = bh_map->b_size >> inode->i_blkbits; + struct metapath mp; + u64 size; BUG_ON(maxlen == 0); if (gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip))) return 0; + bmap_lock(inode, create); + clear_buffer_mapped(bh_map); + clear_buffer_new(bh_map); + clear_buffer_boundary(bh_map); bsize = gfs2_is_dir(ip) ? sdp->sd_jbsize : sdp->sd_sb.sb_bsize; - - height = calc_tree_height(ip, (lblock + 1) * bsize); - if (ip->i_di.di_height < height) { - if (!create) - return 0; - - error = build_height(inode, height); - if (error) - return error; + size = (lblock + 1) * bsize; + + if (size > ip->i_di.di_size) { + height = calc_tree_height(ip, size); + if (ip->i_di.di_height < height) { + if (!create) + goto out_ok; + + error = build_height(inode, height); + if (error) + goto out_fail; + } } - find_metapath(ip, lblock, mp); + find_metapath(ip, lblock, &mp); end_of_metadata = ip->i_di.di_height - 1; - error = gfs2_meta_inode_buffer(ip, &bh); if (error) - return error; + goto out_fail; for (x = 0; x < end_of_metadata; x++) { - lookup_block(ip, bh, x, mp, create, &new, &dblock); + lookup_block(ip, bh, x, &mp, create, &new, &dblock); brelse(bh); if (!dblock) - return 0; + goto out_ok; error = gfs2_meta_indirect_buffer(ip, x+1, dblock, new, &bh); if (error) - return error; + goto out_fail; } - boundary = lookup_block(ip, bh, end_of_metadata, mp, create, &new, &dblock); - clear_buffer_mapped(bh_map); - clear_buffer_new(bh_map); - clear_buffer_boundary(bh_map); - + boundary = lookup_block(ip, bh, end_of_metadata, &mp, create, &new, &dblock); if (dblock) { map_bh(bh_map, inode->i_sb, dblock); if (boundary) - set_buffer_boundary(bh); + set_buffer_boundary(bh_map); if (new) { struct buffer_head *dibh; error = gfs2_meta_inode_buffer(ip, &dibh); if (!error) { gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); } set_buffer_new(bh_map); @@ -507,8 +531,8 @@ static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create, while(--maxlen && !buffer_boundary(bh_map)) { u64 eblock; - mp->mp_list[end_of_metadata]++; - boundary = lookup_block(ip, bh, end_of_metadata, mp, 0, &new, &eblock); + mp.mp_list[end_of_metadata]++; + boundary = lookup_block(ip, bh, end_of_metadata, &mp, 0, &new, &eblock); if (eblock != ++dblock) break; bh_map->b_size += (1 << inode->i_blkbits); @@ -518,43 +542,15 @@ static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create, } out_brelse: brelse(bh); - return 0; -} - - -static inline void bmap_lock(struct inode *inode, int create) -{ - struct gfs2_inode *ip = GFS2_I(inode); - if (create) - down_write(&ip->i_rw_mutex); - else - down_read(&ip->i_rw_mutex); -} - -static inline void bmap_unlock(struct inode *inode, int create) -{ - struct gfs2_inode *ip = GFS2_I(inode); - if (create) - up_write(&ip->i_rw_mutex); - else - up_read(&ip->i_rw_mutex); -} - -int gfs2_block_map(struct inode *inode, u64 lblock, int create, - struct buffer_head *bh) -{ - struct metapath mp; - int ret; - - bmap_lock(inode, create); - ret = gfs2_block_pointers(inode, lblock, create, bh, &mp); +out_ok: + error = 0; +out_fail: bmap_unlock(inode, create); - return ret; + return error; } int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen) { - struct metapath mp; struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 }; int ret; int create = *new; @@ -564,9 +560,7 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi BUG_ON(!new); bh.b_size = 1 << (inode->i_blkbits + 5); - bmap_lock(inode, create); - ret = gfs2_block_pointers(inode, lblock, create, &bh, &mp); - bmap_unlock(inode, create); + ret = gfs2_block_map(inode, lblock, create, &bh); *extlen = bh.b_size >> inode->i_blkbits; *dblock = bh.b_blocknr; if (buffer_new(&bh)) @@ -600,7 +594,7 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh, { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct buffer_head *bh = NULL; - u64 *top, *bottom; + __be64 *top, *bottom; u64 bn; int error; int mh_size = sizeof(struct gfs2_meta_header); @@ -611,17 +605,17 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh, return error; dibh = bh; - top = (u64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0]; - bottom = (u64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs; + top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0]; + bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs; } else { error = gfs2_meta_indirect_buffer(ip, height, block, 0, &bh); if (error) return error; - top = (u64 *)(bh->b_data + mh_size) + + top = (__be64 *)(bh->b_data + mh_size) + (first ? mp->mp_list[height] : 0); - bottom = (u64 *)(bh->b_data + mh_size) + sdp->sd_inptrs; + bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs; } error = bc(ip, dibh, bh, top, bottom, height, data); @@ -660,7 +654,7 @@ out: */ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, - struct buffer_head *bh, u64 *top, u64 *bottom, + struct buffer_head *bh, __be64 *top, __be64 *bottom, unsigned int height, void *data) { struct strip_mine *sm = data; @@ -668,7 +662,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, struct gfs2_rgrp_list rlist; u64 bn, bstart; u32 blen; - u64 *p; + __be64 *p; unsigned int rg_blocks = 0; int metadata; unsigned int revokes = 0; @@ -770,6 +764,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, if (!ip->i_di.di_blocks) gfs2_consist_inode(ip); ip->i_di.di_blocks--; + gfs2_set_inode_blocks(&ip->i_inode); } if (bstart) { if (metadata) @@ -778,9 +773,9 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, gfs2_free_data(ip, bstart, blen); } - ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); + ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); up_write(&ip->i_rw_mutex); @@ -819,7 +814,7 @@ static int do_grow(struct gfs2_inode *ip, u64 size) if (error) goto out; - error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid); + error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid); if (error) goto out_gunlock_q; @@ -853,14 +848,14 @@ static int do_grow(struct gfs2_inode *ip, u64 size) } ip->i_di.di_size = size; - ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); + ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto out_end_trans; gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); out_end_trans: @@ -968,9 +963,9 @@ static int trunc_start(struct gfs2_inode *ip, u64 size) if (gfs2_is_stuffed(ip)) { ip->i_di.di_size = size; - ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); + ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size); error = 1; @@ -980,10 +975,10 @@ static int trunc_start(struct gfs2_inode *ip, u64 size) if (!error) { ip->i_di.di_size = size; - ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); + ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG; gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); } } @@ -1053,11 +1048,11 @@ static int trunc_end(struct gfs2_inode *ip) ip->i_num.no_addr; gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); } - ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); + ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG; gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); out: @@ -1109,7 +1104,7 @@ int gfs2_truncatei(struct gfs2_inode *ip, u64 size) { int error; - if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_di.di_mode))) + if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_inode.i_mode))) return -EINVAL; if (size > ip->i_di.di_size) diff --git a/fs/gfs2/daemon.c b/fs/gfs2/daemon.c index cab1f68d4685..683cb5bda870 100644 --- a/fs/gfs2/daemon.c +++ b/fs/gfs2/daemon.c @@ -112,6 +112,7 @@ int gfs2_logd(void *data) struct gfs2_sbd *sdp = data; struct gfs2_holder ji_gh; unsigned long t; + int need_flush; while (!kthread_should_stop()) { /* Advance the log tail */ @@ -120,8 +121,10 @@ int gfs2_logd(void *data) gfs2_tune_get(sdp, gt_log_flush_secs) * HZ; gfs2_ail1_empty(sdp, DIO_ALL); - - if (time_after_eq(jiffies, t)) { + gfs2_log_lock(sdp); + need_flush = sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks); + gfs2_log_unlock(sdp); + if (need_flush || time_after_eq(jiffies, t)) { gfs2_log_flush(sdp, NULL); sdp->sd_log_flush_time = jiffies; } diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index e24af28b1a12..0fdcb7713cd9 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c @@ -131,8 +131,8 @@ static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf, memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size); if (ip->i_di.di_size < offset + size) ip->i_di.di_size = offset + size; - ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); @@ -229,10 +229,10 @@ out: if (ip->i_di.di_size < offset + copied) ip->i_di.di_size = offset + copied; - ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); + ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); return copied; @@ -340,10 +340,15 @@ fail: return (copied) ? copied : error; } +static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent) +{ + return dent->de_inum.no_addr == 0 || dent->de_inum.no_formal_ino == 0; +} + static inline int __gfs2_dirent_find(const struct gfs2_dirent *dent, const struct qstr *name, int ret) { - if (dent->de_inum.no_addr != 0 && + if (!gfs2_dirent_sentinel(dent) && be32_to_cpu(dent->de_hash) == name->hash && be16_to_cpu(dent->de_name_len) == name->len && memcmp(dent+1, name->name, name->len) == 0) @@ -388,7 +393,7 @@ static int gfs2_dirent_find_space(const struct gfs2_dirent *dent, unsigned actual = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len)); unsigned totlen = be16_to_cpu(dent->de_rec_len); - if (!dent->de_inum.no_addr) + if (gfs2_dirent_sentinel(dent)) actual = GFS2_DIRENT_SIZE(0); if (totlen - actual >= required) return 1; @@ -405,7 +410,7 @@ static int gfs2_dirent_gather(const struct gfs2_dirent *dent, void *opaque) { struct dirent_gather *g = opaque; - if (dent->de_inum.no_addr) { + if (!gfs2_dirent_sentinel(dent)) { g->pdent[g->offset++] = dent; } return 0; @@ -433,10 +438,10 @@ static int gfs2_check_dirent(struct gfs2_dirent *dent, unsigned int offset, if (unlikely(offset + size > len)) goto error; msg = "zero inode number"; - if (unlikely(!first && !dent->de_inum.no_addr)) + if (unlikely(!first && gfs2_dirent_sentinel(dent))) goto error; msg = "name length is greater than space in dirent"; - if (dent->de_inum.no_addr && + if (!gfs2_dirent_sentinel(dent) && unlikely(sizeof(struct gfs2_dirent)+be16_to_cpu(dent->de_name_len) > size)) goto error; @@ -598,7 +603,7 @@ static int dirent_next(struct gfs2_inode *dip, struct buffer_head *bh, return ret; /* Only the first dent could ever have de_inum.no_addr == 0 */ - if (!tmp->de_inum.no_addr) { + if (gfs2_dirent_sentinel(tmp)) { gfs2_consist_inode(dip); return -EIO; } @@ -621,7 +626,7 @@ static void dirent_del(struct gfs2_inode *dip, struct buffer_head *bh, { u16 cur_rec_len, prev_rec_len; - if (!cur->de_inum.no_addr) { + if (gfs2_dirent_sentinel(cur)) { gfs2_consist_inode(dip); return; } @@ -633,7 +638,8 @@ static void dirent_del(struct gfs2_inode *dip, struct buffer_head *bh, out the inode number and return. */ if (!prev) { - cur->de_inum.no_addr = 0; /* No endianess worries */ + cur->de_inum.no_addr = 0; + cur->de_inum.no_formal_ino = 0; return; } @@ -664,7 +670,7 @@ static struct gfs2_dirent *gfs2_init_dirent(struct inode *inode, struct gfs2_dirent *ndent; unsigned offset = 0, totlen; - if (dent->de_inum.no_addr) + if (!gfs2_dirent_sentinel(dent)) offset = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len)); totlen = be16_to_cpu(dent->de_rec_len); BUG_ON(offset + name->len > totlen); @@ -713,12 +719,12 @@ static int get_leaf(struct gfs2_inode *dip, u64 leaf_no, static int get_leaf_nr(struct gfs2_inode *dip, u32 index, u64 *leaf_out) { - u64 leaf_no; + __be64 leaf_no; int error; error = gfs2_dir_read_data(dip, (char *)&leaf_no, - index * sizeof(u64), - sizeof(u64), 0); + index * sizeof(__be64), + sizeof(__be64), 0); if (error != sizeof(u64)) return (error < 0) ? error : -EIO; @@ -837,7 +843,8 @@ static int dir_make_exhash(struct inode *inode) struct gfs2_leaf *leaf; int y; u32 x; - u64 *lp, bn; + __be64 *lp; + u64 bn; int error; error = gfs2_meta_inode_buffer(dip, &dibh); @@ -893,20 +900,20 @@ static int dir_make_exhash(struct inode *inode) gfs2_trans_add_bh(dip->i_gl, dibh, 1); gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); - lp = (u64 *)(dibh->b_data + sizeof(struct gfs2_dinode)); + lp = (__be64 *)(dibh->b_data + sizeof(struct gfs2_dinode)); for (x = sdp->sd_hash_ptrs; x--; lp++) *lp = cpu_to_be64(bn); dip->i_di.di_size = sdp->sd_sb.sb_bsize / 2; dip->i_di.di_blocks++; + gfs2_set_inode_blocks(&dip->i_inode); dip->i_di.di_flags |= GFS2_DIF_EXHASH; - dip->i_di.di_payload_format = 0; for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ; dip->i_di.di_depth = y; - gfs2_dinode_out(&dip->i_di, dibh->b_data); + gfs2_dinode_out(dip, dibh->b_data); brelse(dibh); @@ -929,7 +936,8 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name) struct gfs2_leaf *nleaf, *oleaf; struct gfs2_dirent *dent = NULL, *prev = NULL, *next = NULL, *new; u32 start, len, half_len, divider; - u64 bn, *lp, leaf_no; + u64 bn, leaf_no; + __be64 *lp; u32 index; int x, moved = 0; int error; @@ -974,7 +982,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name) /* Change the pointers. Don't bother distinguishing stuffed from non-stuffed. This code is complicated enough already. */ - lp = kmalloc(half_len * sizeof(u64), GFP_NOFS | __GFP_NOFAIL); + lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS | __GFP_NOFAIL); /* Change the pointers */ for (x = 0; x < half_len; x++) lp[x] = cpu_to_be64(bn); @@ -1000,7 +1008,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name) if (dirent_next(dip, obh, &next)) next = NULL; - if (dent->de_inum.no_addr && + if (!gfs2_dirent_sentinel(dent) && be32_to_cpu(dent->de_hash) < divider) { struct qstr str; str.name = (char*)(dent+1); @@ -1037,7 +1045,8 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name) error = gfs2_meta_inode_buffer(dip, &dibh); if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) { dip->i_di.di_blocks++; - gfs2_dinode_out(&dip->i_di, dibh->b_data); + gfs2_set_inode_blocks(&dip->i_inode); + gfs2_dinode_out(dip, dibh->b_data); brelse(dibh); } @@ -1117,7 +1126,7 @@ static int dir_double_exhash(struct gfs2_inode *dip) error = gfs2_meta_inode_buffer(dip, &dibh); if (!gfs2_assert_withdraw(sdp, !error)) { dip->i_di.di_depth++; - gfs2_dinode_out(&dip->i_di, dibh->b_data); + gfs2_dinode_out(dip, dibh->b_data); brelse(dibh); } @@ -1194,7 +1203,7 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset, int *copied) { const struct gfs2_dirent *dent, *dent_next; - struct gfs2_inum inum; + struct gfs2_inum_host inum; u64 off, off_next; unsigned int x, y; int run = 0; @@ -1341,7 +1350,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque, u32 hsize, len = 0; u32 ht_offset, lp_offset, ht_offset_cur = -1; u32 hash, index; - u64 *lp; + __be64 *lp; int copied = 0; int error = 0; unsigned depth = 0; @@ -1365,7 +1374,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque, if (ht_offset_cur != ht_offset) { error = gfs2_dir_read_data(dip, (char *)lp, - ht_offset * sizeof(u64), + ht_offset * sizeof(__be64), sdp->sd_hash_bsize, 1); if (error != sdp->sd_hash_bsize) { if (error >= 0) @@ -1456,7 +1465,7 @@ out: */ int gfs2_dir_search(struct inode *dir, const struct qstr *name, - struct gfs2_inum *inum, unsigned int *type) + struct gfs2_inum_host *inum, unsigned int *type) { struct buffer_head *bh; struct gfs2_dirent *dent; @@ -1515,7 +1524,8 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name) return error; gfs2_trans_add_bh(ip->i_gl, bh, 1); ip->i_di.di_blocks++; - gfs2_dinode_out(&ip->i_di, bh->b_data); + gfs2_set_inode_blocks(&ip->i_inode); + gfs2_dinode_out(ip, bh->b_data); brelse(bh); return 0; } @@ -1531,7 +1541,7 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name) */ int gfs2_dir_add(struct inode *inode, const struct qstr *name, - const struct gfs2_inum *inum, unsigned type) + const struct gfs2_inum_host *inum, unsigned type) { struct gfs2_inode *ip = GFS2_I(inode); struct buffer_head *bh; @@ -1558,8 +1568,8 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name, break; gfs2_trans_add_bh(ip->i_gl, bh, 1); ip->i_di.di_entries++; - ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); - gfs2_dinode_out(&ip->i_di, bh->b_data); + ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); + gfs2_dinode_out(ip, bh->b_data); brelse(bh); error = 0; break; @@ -1644,8 +1654,8 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name) gfs2_consist_inode(dip); gfs2_trans_add_bh(dip->i_gl, bh, 1); dip->i_di.di_entries--; - dip->i_di.di_mtime = dip->i_di.di_ctime = get_seconds(); - gfs2_dinode_out(&dip->i_di, bh->b_data); + dip->i_inode.i_mtime.tv_sec = dip->i_inode.i_ctime.tv_sec = get_seconds(); + gfs2_dinode_out(dip, bh->b_data); brelse(bh); mark_inode_dirty(&dip->i_inode); @@ -1666,7 +1676,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name) */ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, - struct gfs2_inum *inum, unsigned int new_type) + struct gfs2_inum_host *inum, unsigned int new_type) { struct buffer_head *bh; struct gfs2_dirent *dent; @@ -1692,8 +1702,8 @@ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, gfs2_trans_add_bh(dip->i_gl, bh, 1); } - dip->i_di.di_mtime = dip->i_di.di_ctime = get_seconds(); - gfs2_dinode_out(&dip->i_di, bh->b_data); + dip->i_inode.i_mtime.tv_sec = dip->i_inode.i_ctime.tv_sec = get_seconds(); + gfs2_dinode_out(dip, bh->b_data); brelse(bh); return 0; } @@ -1715,7 +1725,7 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data) u32 hsize, len; u32 ht_offset, lp_offset, ht_offset_cur = -1; u32 index = 0; - u64 *lp; + __be64 *lp; u64 leaf_no; int error = 0; @@ -1735,7 +1745,7 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data) if (ht_offset_cur != ht_offset) { error = gfs2_dir_read_data(dip, (char *)lp, - ht_offset * sizeof(u64), + ht_offset * sizeof(__be64), sdp->sd_hash_bsize, 1); if (error != sdp->sd_hash_bsize) { if (error >= 0) @@ -1859,6 +1869,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len, if (!dip->i_di.di_blocks) gfs2_consist_inode(dip); dip->i_di.di_blocks--; + gfs2_set_inode_blocks(&dip->i_inode); } error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size); @@ -1873,7 +1884,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len, goto out_end_trans; gfs2_trans_add_bh(dip->i_gl, dibh, 1); - gfs2_dinode_out(&dip->i_di, dibh->b_data); + gfs2_dinode_out(dip, dibh->b_data); brelse(dibh); out_end_trans: diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h index 371233419b07..b21b33668a5b 100644 --- a/fs/gfs2/dir.h +++ b/fs/gfs2/dir.h @@ -31,17 +31,17 @@ struct gfs2_inum; typedef int (*gfs2_filldir_t) (void *opaque, const char *name, unsigned int length, u64 offset, - struct gfs2_inum *inum, unsigned int type); + struct gfs2_inum_host *inum, unsigned int type); int gfs2_dir_search(struct inode *dir, const struct qstr *filename, - struct gfs2_inum *inum, unsigned int *type); + struct gfs2_inum_host *inum, unsigned int *type); int gfs2_dir_add(struct inode *inode, const struct qstr *filename, - const struct gfs2_inum *inum, unsigned int type); + const struct gfs2_inum_host *inum, unsigned int type); int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename); int gfs2_dir_read(struct inode *inode, u64 * offset, void *opaque, gfs2_filldir_t filldir); int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, - struct gfs2_inum *new_inum, unsigned int new_type); + struct gfs2_inum_host *new_inum, unsigned int new_type); int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip); diff --git a/fs/gfs2/eaops.c b/fs/gfs2/eaops.c index 92c54e9b0dc3..cd747c00f670 100644 --- a/fs/gfs2/eaops.c +++ b/fs/gfs2/eaops.c @@ -120,7 +120,7 @@ static int system_eo_set(struct gfs2_inode *ip, struct gfs2_ea_request *er) if (GFS2_ACL_IS_ACCESS(er->er_name, er->er_name_len)) { if (!(er->er_flags & GFS2_ERF_MODE)) { - er->er_mode = ip->i_di.di_mode; + er->er_mode = ip->i_inode.i_mode; er->er_flags |= GFS2_ERF_MODE; } error = gfs2_acl_validate_set(ip, 1, er, diff --git a/fs/gfs2/eattr.c b/fs/gfs2/eattr.c index a65a4ccfd4dd..ebebbdcd7057 100644 --- a/fs/gfs2/eattr.c +++ b/fs/gfs2/eattr.c @@ -112,7 +112,7 @@ fail: static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data) { struct buffer_head *bh, *eabh; - u64 *eablk, *end; + __be64 *eablk, *end; int error; error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh); @@ -129,7 +129,7 @@ static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data) goto out; } - eablk = (u64 *)(bh->b_data + sizeof(struct gfs2_meta_header)); + eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)); end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs; for (; eablk < end; eablk++) { @@ -224,7 +224,8 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh, struct gfs2_rgrpd *rgd; struct gfs2_holder rg_gh; struct buffer_head *dibh; - u64 *dataptrs, bn = 0; + __be64 *dataptrs; + u64 bn = 0; u64 bstart = 0; unsigned int blen = 0; unsigned int blks = 0; @@ -280,6 +281,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh, if (!ip->i_di.di_blocks) gfs2_consist_inode(ip); ip->i_di.di_blocks--; + gfs2_set_inode_blocks(&ip->i_inode); } if (bstart) gfs2_free_meta(ip, bstart, blen); @@ -299,9 +301,9 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh, error = gfs2_meta_inode_buffer(ip, &dibh); if (!error) { - ip->i_di.di_ctime = get_seconds(); + ip->i_inode.i_ctime.tv_sec = get_seconds(); gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); } @@ -444,7 +446,7 @@ static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea, struct buffer_head **bh; unsigned int amount = GFS2_EA_DATA_LEN(ea); unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize); - u64 *dataptrs = GFS2_EA2DATAPTRS(ea); + __be64 *dataptrs = GFS2_EA2DATAPTRS(ea); unsigned int x; int error = 0; @@ -597,6 +599,7 @@ static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp) ea->ea_num_ptrs = 0; ip->i_di.di_blocks++; + gfs2_set_inode_blocks(&ip->i_inode); return 0; } @@ -629,7 +632,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea, ea->ea_num_ptrs = 0; memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len); } else { - u64 *dataptr = GFS2_EA2DATAPTRS(ea); + __be64 *dataptr = GFS2_EA2DATAPTRS(ea); const char *data = er->er_data; unsigned int data_len = er->er_data_len; unsigned int copy; @@ -648,6 +651,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea, gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED); ip->i_di.di_blocks++; + gfs2_set_inode_blocks(&ip->i_inode); copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize : data_len; @@ -686,7 +690,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er, if (error) goto out; - error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid); + error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid); if (error) goto out_gunlock_q; @@ -710,13 +714,13 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er, if (!error) { if (er->er_flags & GFS2_ERF_MODE) { gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), - (ip->i_di.di_mode & S_IFMT) == + (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT)); - ip->i_di.di_mode = er->er_mode; + ip->i_inode.i_mode = er->er_mode; } - ip->i_di.di_ctime = get_seconds(); + ip->i_inode.i_ctime.tv_sec = get_seconds(); gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); } @@ -846,12 +850,12 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh, if (er->er_flags & GFS2_ERF_MODE) { gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), - (ip->i_di.di_mode & S_IFMT) == (er->er_mode & S_IFMT)); - ip->i_di.di_mode = er->er_mode; + (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT)); + ip->i_inode.i_mode = er->er_mode; } - ip->i_di.di_ctime = get_seconds(); + ip->i_inode.i_ctime.tv_sec = get_seconds(); gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); out: gfs2_trans_end(GFS2_SB(&ip->i_inode)); @@ -931,12 +935,12 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er, { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct buffer_head *indbh, *newbh; - u64 *eablk; + __be64 *eablk; int error; int mh_size = sizeof(struct gfs2_meta_header); if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) { - u64 *end; + __be64 *end; error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &indbh); @@ -948,7 +952,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er, goto out; } - eablk = (u64 *)(indbh->b_data + mh_size); + eablk = (__be64 *)(indbh->b_data + mh_size); end = eablk + sdp->sd_inptrs; for (; eablk < end; eablk++) @@ -971,11 +975,12 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er, gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN); gfs2_buffer_clear_tail(indbh, mh_size); - eablk = (u64 *)(indbh->b_data + mh_size); + eablk = (__be64 *)(indbh->b_data + mh_size); *eablk = cpu_to_be64(ip->i_di.di_eattr); ip->i_di.di_eattr = blk; ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT; ip->i_di.di_blocks++; + gfs2_set_inode_blocks(&ip->i_inode); eablk++; } @@ -1129,9 +1134,9 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el) error = gfs2_meta_inode_buffer(ip, &dibh); if (!error) { - ip->i_di.di_ctime = get_seconds(); + ip->i_inode.i_ctime.tv_sec = get_seconds(); gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); } @@ -1202,7 +1207,7 @@ static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip, struct buffer_head **bh; unsigned int amount = GFS2_EA_DATA_LEN(ea); unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize); - u64 *dataptrs = GFS2_EA2DATAPTRS(ea); + __be64 *dataptrs = GFS2_EA2DATAPTRS(ea); unsigned int x; int error; @@ -1284,9 +1289,8 @@ int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el, if (!error) { error = inode_setattr(&ip->i_inode, attr); gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error); - gfs2_inode_attr_out(ip); gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); } @@ -1300,7 +1304,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip) struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_rgrp_list rlist; struct buffer_head *indbh, *dibh; - u64 *eablk, *end; + __be64 *eablk, *end; unsigned int rg_blocks = 0; u64 bstart = 0; unsigned int blen = 0; @@ -1319,7 +1323,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip) goto out; } - eablk = (u64 *)(indbh->b_data + sizeof(struct gfs2_meta_header)); + eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header)); end = eablk + sdp->sd_inptrs; for (; eablk < end; eablk++) { @@ -1363,7 +1367,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip) gfs2_trans_add_bh(ip->i_gl, indbh, 1); - eablk = (u64 *)(indbh->b_data + sizeof(struct gfs2_meta_header)); + eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header)); bstart = 0; blen = 0; @@ -1387,6 +1391,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip) if (!ip->i_di.di_blocks) gfs2_consist_inode(ip); ip->i_di.di_blocks--; + gfs2_set_inode_blocks(&ip->i_inode); } if (bstart) gfs2_free_meta(ip, bstart, blen); @@ -1396,7 +1401,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip) error = gfs2_meta_inode_buffer(ip, &dibh); if (!error) { gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); } @@ -1441,11 +1446,12 @@ static int ea_dealloc_block(struct gfs2_inode *ip) if (!ip->i_di.di_blocks) gfs2_consist_inode(ip); ip->i_di.di_blocks--; + gfs2_set_inode_blocks(&ip->i_inode); error = gfs2_meta_inode_buffer(ip, &dibh); if (!error) { gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); } diff --git a/fs/gfs2/eattr.h b/fs/gfs2/eattr.h index ffa65947d686..c82dbe01d713 100644 --- a/fs/gfs2/eattr.h +++ b/fs/gfs2/eattr.h @@ -19,7 +19,7 @@ struct iattr; #define GFS2_EA_SIZE(ea) \ ALIGN(sizeof(struct gfs2_ea_header) + (ea)->ea_name_len + \ ((GFS2_EA_IS_STUFFED(ea)) ? GFS2_EA_DATA_LEN(ea) : \ - (sizeof(u64) * (ea)->ea_num_ptrs)), 8) + (sizeof(__be64) * (ea)->ea_num_ptrs)), 8) #define GFS2_EA_IS_STUFFED(ea) (!(ea)->ea_num_ptrs) #define GFS2_EA_IS_LAST(ea) ((ea)->ea_flags & GFS2_EAFLAG_LAST) @@ -29,13 +29,13 @@ ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + (er)->er_data_len, 8) #define GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er) \ ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + \ - sizeof(u64) * DIV_ROUND_UP((er)->er_data_len, (sdp)->sd_jbsize), 8) + sizeof(__be64) * DIV_ROUND_UP((er)->er_data_len, (sdp)->sd_jbsize), 8) #define GFS2_EA2NAME(ea) ((char *)((struct gfs2_ea_header *)(ea) + 1)) #define GFS2_EA2DATA(ea) (GFS2_EA2NAME(ea) + (ea)->ea_name_len) #define GFS2_EA2DATAPTRS(ea) \ -((u64 *)(GFS2_EA2NAME(ea) + ALIGN((ea)->ea_name_len, 8))) +((__be64 *)(GFS2_EA2NAME(ea) + ALIGN((ea)->ea_name_len, 8))) #define GFS2_EA2NEXT(ea) \ ((struct gfs2_ea_header *)((char *)(ea) + GFS2_EA_REC_LEN(ea))) diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 78fe0fae23ff..438146904b58 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -35,7 +35,7 @@ struct greedy { struct gfs2_holder gr_gh; - struct work_struct gr_work; + struct delayed_work gr_work; }; struct gfs2_gl_hash_bucket { @@ -96,7 +96,7 @@ static inline rwlock_t *gl_lock_addr(unsigned int x) return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)]; } #else /* not SMP, so no spinlocks required */ -static inline rwlock_t *gl_lock_addr(x) +static inline rwlock_t *gl_lock_addr(unsigned int x) { return NULL; } @@ -769,7 +769,7 @@ restart: } else { spin_unlock(&gl->gl_spin); - new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_KERNEL); + new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_NOFS); if (!new_gh) return; set_bit(HIF_DEMOTE, &new_gh->gh_iflags); @@ -785,21 +785,6 @@ out: gfs2_holder_put(new_gh); } -void gfs2_glock_inode_squish(struct inode *inode) -{ - struct gfs2_holder gh; - struct gfs2_glock *gl = GFS2_I(inode)->i_gl; - gfs2_holder_init(gl, LM_ST_UNLOCKED, 0, &gh); - set_bit(HIF_DEMOTE, &gh.gh_iflags); - spin_lock(&gl->gl_spin); - gfs2_assert(inode->i_sb->s_fs_info, list_empty(&gl->gl_holders)); - list_add_tail(&gh.gh_list, &gl->gl_waiters2); - run_queue(gl); - spin_unlock(&gl->gl_spin); - wait_for_completion(&gh.gh_wait); - gfs2_holder_uninit(&gh); -} - /** * state_change - record that the glock is now in a different state * @gl: the glock @@ -847,12 +832,12 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) { if (glops->go_inval) - glops->go_inval(gl, DIO_METADATA | DIO_DATA); + glops->go_inval(gl, DIO_METADATA); } else if (gl->gl_state == LM_ST_DEFERRED) { /* We might not want to do this here. Look at moving to the inode glops. */ if (glops->go_inval) - glops->go_inval(gl, DIO_DATA); + glops->go_inval(gl, 0); } /* Deal with each possible exit condition */ @@ -954,7 +939,7 @@ void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags) gfs2_assert_warn(sdp, state != gl->gl_state); if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync) - glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE); + glops->go_sync(gl); gfs2_glock_hold(gl); gl->gl_req_bh = xmote_bh; @@ -995,7 +980,7 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret) state_change(gl, LM_ST_UNLOCKED); if (glops->go_inval) - glops->go_inval(gl, DIO_METADATA | DIO_DATA); + glops->go_inval(gl, DIO_METADATA); if (gh) { spin_lock(&gl->gl_spin); @@ -1041,7 +1026,7 @@ void gfs2_glock_drop_th(struct gfs2_glock *gl) gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync) - glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE); + glops->go_sync(gl); gfs2_glock_hold(gl); gl->gl_req_bh = drop_bh; @@ -1244,9 +1229,6 @@ restart: clear_bit(GLF_PREFETCH, &gl->gl_flags); - if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP)) - dump_glock(gl); - return error; } @@ -1368,9 +1350,9 @@ static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state, glops->go_xmote_th(gl, state, flags); } -static void greedy_work(void *data) +static void greedy_work(struct work_struct *work) { - struct greedy *gr = data; + struct greedy *gr = container_of(work, struct greedy, gr_work.work); struct gfs2_holder *gh = &gr->gr_gh; struct gfs2_glock *gl = gh->gh_gl; const struct gfs2_glock_operations *glops = gl->gl_ops; @@ -1422,7 +1404,7 @@ int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time) gfs2_holder_init(gl, 0, 0, gh); set_bit(HIF_GREEDY, &gh->gh_iflags); - INIT_WORK(&gr->gr_work, greedy_work, gr); + INIT_DELAYED_WORK(&gr->gr_work, greedy_work); set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags); schedule_delayed_work(&gr->gr_work, time); @@ -1923,7 +1905,7 @@ out: static void scan_glock(struct gfs2_glock *gl) { - if (gl->gl_ops == &gfs2_inode_glops) + if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) return; if (gfs2_glmutex_trylock(gl)) { @@ -2078,7 +2060,7 @@ static int dump_inode(struct gfs2_inode *ip) printk(KERN_INFO " num = %llu %llu\n", (unsigned long long)ip->i_num.no_formal_ino, (unsigned long long)ip->i_num.no_addr); - printk(KERN_INFO " type = %u\n", IF2DT(ip->i_di.di_mode)); + printk(KERN_INFO " type = %u\n", IF2DT(ip->i_inode.i_mode)); printk(KERN_INFO " i_flags ="); for (x = 0; x < 32; x++) if (test_bit(x, &ip->i_flags)) diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index 2b2a889ee2cc..fb39108fc05c 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -27,8 +27,6 @@ #define GL_ATIME 0x00000200 #define GL_NOCACHE 0x00000400 #define GL_NOCANCEL 0x00001000 -#define GL_AOP 0x00004000 -#define GL_DUMP 0x00008000 #define GLR_TRYFAILED 13 #define GLR_CANCELED 14 @@ -108,7 +106,6 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs); void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number, const struct gfs2_glock_operations *glops, unsigned int state, int flags); -void gfs2_glock_inode_squish(struct inode *inode); /** * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 41a6b6818a50..b068d10bcb6e 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -92,7 +92,7 @@ static void gfs2_pte_inval(struct gfs2_glock *gl) ip = gl->gl_object; inode = &ip->i_inode; - if (!ip || !S_ISREG(ip->i_di.di_mode)) + if (!ip || !S_ISREG(inode->i_mode)) return; if (!test_bit(GIF_PAGED, &ip->i_flags)) @@ -107,89 +107,20 @@ static void gfs2_pte_inval(struct gfs2_glock *gl) } /** - * gfs2_page_inval - Invalidate all pages associated with a glock - * @gl: the glock - * - */ - -static void gfs2_page_inval(struct gfs2_glock *gl) -{ - struct gfs2_inode *ip; - struct inode *inode; - - ip = gl->gl_object; - inode = &ip->i_inode; - if (!ip || !S_ISREG(ip->i_di.di_mode)) - return; - - truncate_inode_pages(inode->i_mapping, 0); - gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !inode->i_mapping->nrpages); - clear_bit(GIF_PAGED, &ip->i_flags); -} - -/** - * gfs2_page_wait - Wait for writeback of data - * @gl: the glock - * - * Syncs data (not metadata) for a regular file. - * No-op for all other types. - */ - -static void gfs2_page_wait(struct gfs2_glock *gl) -{ - struct gfs2_inode *ip = gl->gl_object; - struct inode *inode = &ip->i_inode; - struct address_space *mapping = inode->i_mapping; - int error; - - if (!S_ISREG(ip->i_di.di_mode)) - return; - - error = filemap_fdatawait(mapping); - - /* Put back any errors cleared by filemap_fdatawait() - so they can be caught by someone who can pass them - up to user space. */ - - if (error == -ENOSPC) - set_bit(AS_ENOSPC, &mapping->flags); - else if (error) - set_bit(AS_EIO, &mapping->flags); - -} - -static void gfs2_page_writeback(struct gfs2_glock *gl) -{ - struct gfs2_inode *ip = gl->gl_object; - struct inode *inode = &ip->i_inode; - struct address_space *mapping = inode->i_mapping; - - if (!S_ISREG(ip->i_di.di_mode)) - return; - - filemap_fdatawrite(mapping); -} - -/** * meta_go_sync - sync out the metadata for this glock * @gl: the glock - * @flags: DIO_* * * Called when demoting or unlocking an EX glock. We must flush * to disk all dirty buffers/pages relating to this glock, and must not * not return to caller to demote/unlock the glock until I/O is complete. */ -static void meta_go_sync(struct gfs2_glock *gl, int flags) +static void meta_go_sync(struct gfs2_glock *gl) { - if (!(flags & DIO_METADATA)) - return; - if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) { gfs2_log_flush(gl->gl_sbd, gl); gfs2_meta_sync(gl); - if (flags & DIO_RELEASE) - gfs2_ail_empty_gl(gl); + gfs2_ail_empty_gl(gl); } } @@ -264,31 +195,31 @@ static void inode_go_drop_th(struct gfs2_glock *gl) /** * inode_go_sync - Sync the dirty data and/or metadata for an inode glock * @gl: the glock protecting the inode - * @flags: * */ -static void inode_go_sync(struct gfs2_glock *gl, int flags) +static void inode_go_sync(struct gfs2_glock *gl) { - int meta = (flags & DIO_METADATA); - int data = (flags & DIO_DATA); + struct gfs2_inode *ip = gl->gl_object; + + if (ip && !S_ISREG(ip->i_inode.i_mode)) + ip = NULL; if (test_bit(GLF_DIRTY, &gl->gl_flags)) { - if (meta && data) { - gfs2_page_writeback(gl); - gfs2_log_flush(gl->gl_sbd, gl); - gfs2_meta_sync(gl); - gfs2_page_wait(gl); - clear_bit(GLF_DIRTY, &gl->gl_flags); - } else if (meta) { - gfs2_log_flush(gl->gl_sbd, gl); - gfs2_meta_sync(gl); - } else if (data) { - gfs2_page_writeback(gl); - gfs2_page_wait(gl); + gfs2_log_flush(gl->gl_sbd, gl); + if (ip) + filemap_fdatawrite(ip->i_inode.i_mapping); + gfs2_meta_sync(gl); + if (ip) { + struct address_space *mapping = ip->i_inode.i_mapping; + int error = filemap_fdatawait(mapping); + if (error == -ENOSPC) + set_bit(AS_ENOSPC, &mapping->flags); + else if (error) + set_bit(AS_EIO, &mapping->flags); } - if (flags & DIO_RELEASE) - gfs2_ail_empty_gl(gl); + clear_bit(GLF_DIRTY, &gl->gl_flags); + gfs2_ail_empty_gl(gl); } } @@ -301,15 +232,20 @@ static void inode_go_sync(struct gfs2_glock *gl, int flags) static void inode_go_inval(struct gfs2_glock *gl, int flags) { + struct gfs2_inode *ip = gl->gl_object; int meta = (flags & DIO_METADATA); - int data = (flags & DIO_DATA); if (meta) { gfs2_meta_inval(gl); - gl->gl_vn++; + if (ip) + set_bit(GIF_INVALID, &ip->i_flags); + } + + if (ip && S_ISREG(ip->i_inode.i_mode)) { + truncate_inode_pages(ip->i_inode.i_mapping, 0); + gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !ip->i_inode.i_mapping->nrpages); + clear_bit(GIF_PAGED, &ip->i_flags); } - if (data) - gfs2_page_inval(gl); } /** @@ -351,11 +287,10 @@ static int inode_go_lock(struct gfs2_holder *gh) if (!ip) return 0; - if (ip->i_vn != gl->gl_vn) { + if (test_bit(GIF_INVALID, &ip->i_flags)) { error = gfs2_inode_refresh(ip); if (error) return error; - gfs2_inode_attr_in(ip); } if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) && @@ -379,11 +314,8 @@ static void inode_go_unlock(struct gfs2_holder *gh) struct gfs2_glock *gl = gh->gh_gl; struct gfs2_inode *ip = gl->gl_object; - if (ip == NULL) - return; - if (test_bit(GLF_DIRTY, &gl->gl_flags)) - gfs2_inode_attr_in(ip); - gfs2_meta_cache_flush(ip); + if (ip) + gfs2_meta_cache_flush(ip); } /** @@ -491,13 +423,13 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl) struct gfs2_sbd *sdp = gl->gl_sbd; struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); struct gfs2_glock *j_gl = ip->i_gl; - struct gfs2_log_header head; + struct gfs2_log_header_host head; int error; if (gl->gl_state != LM_ST_UNLOCKED && test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode)); - j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA); + j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); error = gfs2_find_jhead(sdp->sd_jdesc, &head); if (error) diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 118dc693d111..734421edae85 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -14,8 +14,6 @@ #define DIO_WAIT 0x00000010 #define DIO_METADATA 0x00000020 -#define DIO_DATA 0x00000040 -#define DIO_RELEASE 0x00000080 #define DIO_ALL 0x00000100 struct gfs2_log_operations; @@ -41,7 +39,7 @@ struct gfs2_log_operations { void (*lo_before_commit) (struct gfs2_sbd *sdp); void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai); void (*lo_before_scan) (struct gfs2_jdesc *jd, - struct gfs2_log_header *head, int pass); + struct gfs2_log_header_host *head, int pass); int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start, struct gfs2_log_descriptor *ld, __be64 *ptr, int pass); @@ -67,8 +65,8 @@ struct gfs2_rgrpd { struct list_head rd_list_mru; struct list_head rd_recent; /* Recently used rgrps */ struct gfs2_glock *rd_gl; /* Glock for this rgrp */ - struct gfs2_rindex rd_ri; - struct gfs2_rgrp rd_rg; + struct gfs2_rindex_host rd_ri; + struct gfs2_rgrp_host rd_rg; u64 rd_rg_vn; struct gfs2_bitmap *rd_bits; unsigned int rd_bh_count; @@ -103,18 +101,17 @@ struct gfs2_bufdata { }; struct gfs2_glock_operations { - void (*go_xmote_th) (struct gfs2_glock * gl, unsigned int state, - int flags); - void (*go_xmote_bh) (struct gfs2_glock * gl); - void (*go_drop_th) (struct gfs2_glock * gl); - void (*go_drop_bh) (struct gfs2_glock * gl); - void (*go_sync) (struct gfs2_glock * gl, int flags); - void (*go_inval) (struct gfs2_glock * gl, int flags); - int (*go_demote_ok) (struct gfs2_glock * gl); - int (*go_lock) (struct gfs2_holder * gh); - void (*go_unlock) (struct gfs2_holder * gh); - void (*go_callback) (struct gfs2_glock * gl, unsigned int state); - void (*go_greedy) (struct gfs2_glock * gl); + void (*go_xmote_th) (struct gfs2_glock *gl, unsigned int state, int flags); + void (*go_xmote_bh) (struct gfs2_glock *gl); + void (*go_drop_th) (struct gfs2_glock *gl); + void (*go_drop_bh) (struct gfs2_glock *gl); + void (*go_sync) (struct gfs2_glock *gl); + void (*go_inval) (struct gfs2_glock *gl, int flags); + int (*go_demote_ok) (struct gfs2_glock *gl); + int (*go_lock) (struct gfs2_holder *gh); + void (*go_unlock) (struct gfs2_holder *gh); + void (*go_callback) (struct gfs2_glock *gl, unsigned int state); + void (*go_greedy) (struct gfs2_glock *gl); const int go_type; }; @@ -217,6 +214,7 @@ struct gfs2_alloc { }; enum { + GIF_INVALID = 0, GIF_QD_LOCKED = 1, GIF_PAGED = 2, GIF_SW_PAGED = 3, @@ -224,12 +222,11 @@ enum { struct gfs2_inode { struct inode i_inode; - struct gfs2_inum i_num; + struct gfs2_inum_host i_num; unsigned long i_flags; /* GIF_... */ - u64 i_vn; - struct gfs2_dinode i_di; /* To be replaced by ref to block */ + struct gfs2_dinode_host i_di; /* To be replaced by ref to block */ struct gfs2_glock *i_gl; /* Move into i_gh? */ struct gfs2_holder i_iopen_gh; @@ -450,7 +447,7 @@ struct gfs2_sbd { struct super_block *sd_vfs_meta; struct kobject sd_kobj; unsigned long sd_flags; /* SDF_... */ - struct gfs2_sb sd_sb; + struct gfs2_sb_host sd_sb; /* Constants computed on mount */ @@ -503,8 +500,8 @@ struct gfs2_sbd { spinlock_t sd_statfs_spin; struct mutex sd_statfs_mutex; - struct gfs2_statfs_change sd_statfs_master; - struct gfs2_statfs_change sd_statfs_local; + struct gfs2_statfs_change_host sd_statfs_master; + struct gfs2_statfs_change_host sd_statfs_local; unsigned long sd_statfs_sync_time; /* Resource group stuff */ diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 57c43ac47925..d122074c45e1 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -38,83 +38,12 @@ #include "trans.h" #include "util.h" -/** - * gfs2_inode_attr_in - Copy attributes from the dinode into the VFS inode - * @ip: The GFS2 inode (with embedded disk inode data) - * @inode: The Linux VFS inode - * - */ - -void gfs2_inode_attr_in(struct gfs2_inode *ip) -{ - struct inode *inode = &ip->i_inode; - struct gfs2_dinode *di = &ip->i_di; - - inode->i_ino = ip->i_num.no_addr; - - switch (di->di_mode & S_IFMT) { - case S_IFBLK: - case S_IFCHR: - inode->i_rdev = MKDEV(di->di_major, di->di_minor); - break; - default: - inode->i_rdev = 0; - break; - }; - - inode->i_mode = di->di_mode; - inode->i_nlink = di->di_nlink; - inode->i_uid = di->di_uid; - inode->i_gid = di->di_gid; - i_size_write(inode, di->di_size); - inode->i_atime.tv_sec = di->di_atime; - inode->i_mtime.tv_sec = di->di_mtime; - inode->i_ctime.tv_sec = di->di_ctime; - inode->i_atime.tv_nsec = 0; - inode->i_mtime.tv_nsec = 0; - inode->i_ctime.tv_nsec = 0; - inode->i_blocks = di->di_blocks << - (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT); - - if (di->di_flags & GFS2_DIF_IMMUTABLE) - inode->i_flags |= S_IMMUTABLE; - else - inode->i_flags &= ~S_IMMUTABLE; - - if (di->di_flags & GFS2_DIF_APPENDONLY) - inode->i_flags |= S_APPEND; - else - inode->i_flags &= ~S_APPEND; -} - -/** - * gfs2_inode_attr_out - Copy attributes from VFS inode into the dinode - * @ip: The GFS2 inode - * - * Only copy out the attributes that we want the VFS layer - * to be able to modify. - */ - -void gfs2_inode_attr_out(struct gfs2_inode *ip) -{ - struct inode *inode = &ip->i_inode; - struct gfs2_dinode *di = &ip->i_di; - gfs2_assert_withdraw(GFS2_SB(inode), - (di->di_mode & S_IFMT) == (inode->i_mode & S_IFMT)); - di->di_mode = inode->i_mode; - di->di_uid = inode->i_uid; - di->di_gid = inode->i_gid; - di->di_atime = inode->i_atime.tv_sec; - di->di_mtime = inode->i_mtime.tv_sec; - di->di_ctime = inode->i_ctime.tv_sec; -} - static int iget_test(struct inode *inode, void *opaque) { struct gfs2_inode *ip = GFS2_I(inode); - struct gfs2_inum *inum = opaque; + struct gfs2_inum_host *inum = opaque; - if (ip && ip->i_num.no_addr == inum->no_addr) + if (ip->i_num.no_addr == inum->no_addr) return 1; return 0; @@ -123,19 +52,20 @@ static int iget_test(struct inode *inode, void *opaque) static int iget_set(struct inode *inode, void *opaque) { struct gfs2_inode *ip = GFS2_I(inode); - struct gfs2_inum *inum = opaque; + struct gfs2_inum_host *inum = opaque; ip->i_num = *inum; + inode->i_ino = inum->no_addr; return 0; } -struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum *inum) +struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum_host *inum) { return ilookup5(sb, (unsigned long)inum->no_formal_ino, iget_test, inum); } -static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum *inum) +static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum_host *inum) { return iget5_locked(sb, (unsigned long)inum->no_formal_ino, iget_test, iget_set, inum); @@ -150,13 +80,16 @@ static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum *inum) * Returns: A VFS inode, or an error */ -struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum, unsigned int type) +struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum_host *inum, unsigned int type) { struct inode *inode = gfs2_iget(sb, inum); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_glock *io_gl; int error; + if (!inode) + return ERR_PTR(-ENOBUFS); + if (inode->i_state & I_NEW) { struct gfs2_sbd *sdp = GFS2_SB(inode); umode_t mode = DT2IF(type); @@ -185,7 +118,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum, if (unlikely(error)) goto fail_put; - ip->i_vn = ip->i_gl->gl_vn - 1; + set_bit(GIF_INVALID, &ip->i_flags); error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); if (unlikely(error)) goto fail_iopen; @@ -205,6 +138,63 @@ fail: return ERR_PTR(error); } +static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) +{ + struct gfs2_dinode_host *di = &ip->i_di; + const struct gfs2_dinode *str = buf; + + if (ip->i_num.no_addr != be64_to_cpu(str->di_num.no_addr)) { + if (gfs2_consist_inode(ip)) + gfs2_dinode_print(ip); + return -EIO; + } + if (ip->i_num.no_formal_ino != be64_to_cpu(str->di_num.no_formal_ino)) + return -ESTALE; + + ip->i_inode.i_mode = be32_to_cpu(str->di_mode); + ip->i_inode.i_rdev = 0; + switch (ip->i_inode.i_mode & S_IFMT) { + case S_IFBLK: + case S_IFCHR: + ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major), + be32_to_cpu(str->di_minor)); + break; + }; + + ip->i_inode.i_uid = be32_to_cpu(str->di_uid); + ip->i_inode.i_gid = be32_to_cpu(str->di_gid); + /* + * We will need to review setting the nlink count here in the + * light of the forthcoming ro bind mount work. This is a reminder + * to do that. + */ + ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink); + di->di_size = be64_to_cpu(str->di_size); + i_size_write(&ip->i_inode, di->di_size); + di->di_blocks = be64_to_cpu(str->di_blocks); + gfs2_set_inode_blocks(&ip->i_inode); + ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime); + ip->i_inode.i_atime.tv_nsec = 0; + ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime); + ip->i_inode.i_mtime.tv_nsec = 0; + ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime); + ip->i_inode.i_ctime.tv_nsec = 0; + + di->di_goal_meta = be64_to_cpu(str->di_goal_meta); + di->di_goal_data = be64_to_cpu(str->di_goal_data); + di->di_generation = be64_to_cpu(str->di_generation); + + di->di_flags = be32_to_cpu(str->di_flags); + gfs2_set_inode_flags(&ip->i_inode); + di->di_height = be16_to_cpu(str->di_height); + + di->di_depth = be16_to_cpu(str->di_depth); + di->di_entries = be32_to_cpu(str->di_entries); + + di->di_eattr = be64_to_cpu(str->di_eattr); + return 0; +} + /** * gfs2_inode_refresh - Refresh the incore copy of the dinode * @ip: The GFS2 inode @@ -226,21 +216,11 @@ int gfs2_inode_refresh(struct gfs2_inode *ip) return -EIO; } - gfs2_dinode_in(&ip->i_di, dibh->b_data); - + error = gfs2_dinode_in(ip, dibh->b_data); brelse(dibh); + clear_bit(GIF_INVALID, &ip->i_flags); - if (ip->i_num.no_addr != ip->i_di.di_num.no_addr) { - if (gfs2_consist_inode(ip)) - gfs2_dinode_print(&ip->i_di); - return -EIO; - } - if (ip->i_num.no_formal_ino != ip->i_di.di_num.no_formal_ino) - return -ESTALE; - - ip->i_vn = ip->i_gl->gl_vn; - - return 0; + return error; } int gfs2_dinode_dealloc(struct gfs2_inode *ip) @@ -252,7 +232,7 @@ int gfs2_dinode_dealloc(struct gfs2_inode *ip) if (ip->i_di.di_blocks != 1) { if (gfs2_consist_inode(ip)) - gfs2_dinode_print(&ip->i_di); + gfs2_dinode_print(ip); return -EIO; } @@ -315,14 +295,14 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff) u32 nlink; int error; - BUG_ON(ip->i_di.di_nlink != ip->i_inode.i_nlink); - nlink = ip->i_di.di_nlink + diff; + BUG_ON(diff != 1 && diff != -1); + nlink = ip->i_inode.i_nlink + diff; /* If we are reducing the nlink count, but the new value ends up being bigger than the old one, we must have underflowed. */ - if (diff < 0 && nlink > ip->i_di.di_nlink) { + if (diff < 0 && nlink > ip->i_inode.i_nlink) { if (gfs2_consist_inode(ip)) - gfs2_dinode_print(&ip->i_di); + gfs2_dinode_print(ip); return -EIO; } @@ -330,16 +310,19 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff) if (error) return error; - ip->i_di.di_nlink = nlink; - ip->i_di.di_ctime = get_seconds(); - ip->i_inode.i_nlink = nlink; + if (diff > 0) + inc_nlink(&ip->i_inode); + else + drop_nlink(&ip->i_inode); + + ip->i_inode.i_ctime.tv_sec = get_seconds(); gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); mark_inode_dirty(&ip->i_inode); - if (ip->i_di.di_nlink == 0) { + if (ip->i_inode.i_nlink == 0) { struct gfs2_rgrpd *rgd; struct gfs2_holder ri_gh, rg_gh; @@ -354,7 +337,6 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff) if (error) goto out_norgrp; - clear_nlink(&ip->i_inode); gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */ gfs2_glock_dq_uninit(&rg_gh); out_norgrp: @@ -391,7 +373,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, struct super_block *sb = dir->i_sb; struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_holder d_gh; - struct gfs2_inum inum; + struct gfs2_inum_host inum; unsigned int type; int error = 0; struct inode *inode = NULL; @@ -433,7 +415,7 @@ static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino) { struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode); struct buffer_head *bh; - struct gfs2_inum_range ir; + struct gfs2_inum_range_host ir; int error; error = gfs2_trans_begin(sdp, RES_DINODE, 0); @@ -476,7 +458,7 @@ static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino) struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode); struct gfs2_holder gh; struct buffer_head *bh; - struct gfs2_inum_range ir; + struct gfs2_inum_range_host ir; int error; error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); @@ -497,21 +479,22 @@ static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino) if (!ir.ir_length) { struct buffer_head *m_bh; u64 x, y; + __be64 z; error = gfs2_meta_inode_buffer(m_ip, &m_bh); if (error) goto out_brelse; - x = *(u64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)); - x = y = be64_to_cpu(x); + z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)); + x = y = be64_to_cpu(z); ir.ir_start = x; ir.ir_length = GFS2_INUM_QUANTUM; x += GFS2_INUM_QUANTUM; if (x < y) gfs2_consist_inode(m_ip); - x = cpu_to_be64(x); + z = cpu_to_be64(x); gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1); - *(u64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = x; + *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z; brelse(m_bh); } @@ -564,7 +547,7 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name, return error; /* Don't create entries in an unlinked directory */ - if (!dip->i_di.di_nlink) + if (!dip->i_inode.i_nlink) return -EPERM; error = gfs2_dir_search(&dip->i_inode, name, NULL, NULL); @@ -580,7 +563,7 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name, if (dip->i_di.di_entries == (u32)-1) return -EFBIG; - if (S_ISDIR(mode) && dip->i_di.di_nlink == (u32)-1) + if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1) return -EMLINK; return 0; @@ -590,24 +573,24 @@ static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode, unsigned int *uid, unsigned int *gid) { if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir && - (dip->i_di.di_mode & S_ISUID) && dip->i_di.di_uid) { + (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) { if (S_ISDIR(*mode)) *mode |= S_ISUID; - else if (dip->i_di.di_uid != current->fsuid) + else if (dip->i_inode.i_uid != current->fsuid) *mode &= ~07111; - *uid = dip->i_di.di_uid; + *uid = dip->i_inode.i_uid; } else *uid = current->fsuid; - if (dip->i_di.di_mode & S_ISGID) { + if (dip->i_inode.i_mode & S_ISGID) { if (S_ISDIR(*mode)) *mode |= S_ISGID; - *gid = dip->i_di.di_gid; + *gid = dip->i_inode.i_gid; } else *gid = current->fsgid; } -static int alloc_dinode(struct gfs2_inode *dip, struct gfs2_inum *inum, +static int alloc_dinode(struct gfs2_inode *dip, struct gfs2_inum_host *inum, u64 *generation) { struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); @@ -647,9 +630,9 @@ out: */ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl, - const struct gfs2_inum *inum, unsigned int mode, + const struct gfs2_inum_host *inum, unsigned int mode, unsigned int uid, unsigned int gid, - const u64 *generation) + const u64 *generation, dev_t dev) { struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct gfs2_dinode *di; @@ -666,14 +649,15 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl, di->di_mode = cpu_to_be32(mode); di->di_uid = cpu_to_be32(uid); di->di_gid = cpu_to_be32(gid); - di->di_nlink = cpu_to_be32(0); - di->di_size = cpu_to_be64(0); + di->di_nlink = 0; + di->di_size = 0; di->di_blocks = cpu_to_be64(1); di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(get_seconds()); - di->di_major = di->di_minor = cpu_to_be32(0); + di->di_major = cpu_to_be32(MAJOR(dev)); + di->di_minor = cpu_to_be32(MINOR(dev)); di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr); di->di_generation = cpu_to_be64(*generation); - di->di_flags = cpu_to_be32(0); + di->di_flags = 0; if (S_ISREG(mode)) { if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) || @@ -690,22 +674,22 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl, } di->__pad1 = 0; - di->di_payload_format = cpu_to_be32(0); - di->di_height = cpu_to_be32(0); + di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0); + di->di_height = 0; di->__pad2 = 0; di->__pad3 = 0; - di->di_depth = cpu_to_be16(0); - di->di_entries = cpu_to_be32(0); + di->di_depth = 0; + di->di_entries = 0; memset(&di->__pad4, 0, sizeof(di->__pad4)); - di->di_eattr = cpu_to_be64(0); + di->di_eattr = 0; memset(&di->di_reserved, 0, sizeof(di->di_reserved)); brelse(dibh); } static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl, - unsigned int mode, const struct gfs2_inum *inum, - const u64 *generation) + unsigned int mode, const struct gfs2_inum_host *inum, + const u64 *generation, dev_t dev) { struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); unsigned int uid, gid; @@ -726,7 +710,7 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl, if (error) goto out_quota; - init_dinode(dip, gl, inum, mode, uid, gid, generation); + init_dinode(dip, gl, inum, mode, uid, gid, generation, dev); gfs2_quota_change(dip, +1, uid, gid); gfs2_trans_end(sdp); @@ -756,8 +740,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name, if (alloc_required < 0) goto fail; if (alloc_required) { - error = gfs2_quota_check(dip, dip->i_di.di_uid, - dip->i_di.di_gid); + error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid); if (error) goto fail_quota_locks; @@ -779,16 +762,16 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name, goto fail_quota_locks; } - error = gfs2_dir_add(&dip->i_inode, name, &ip->i_num, IF2DT(ip->i_di.di_mode)); + error = gfs2_dir_add(&dip->i_inode, name, &ip->i_num, IF2DT(ip->i_inode.i_mode)); if (error) goto fail_end_trans; error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto fail_end_trans; - ip->i_di.di_nlink = 1; + ip->i_inode.i_nlink = 1; gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); return 0; @@ -857,13 +840,13 @@ static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip) */ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name, - unsigned int mode) + unsigned int mode, dev_t dev) { struct inode *inode; struct gfs2_inode *dip = ghs->gh_gl->gl_object; struct inode *dir = &dip->i_inode; struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); - struct gfs2_inum inum; + struct gfs2_inum_host inum; int error; u64 generation; @@ -887,35 +870,12 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name, if (error) goto fail_gunlock; - if (inum.no_addr < dip->i_num.no_addr) { - gfs2_glock_dq(ghs); - - error = gfs2_glock_nq_num(sdp, inum.no_addr, - &gfs2_inode_glops, LM_ST_EXCLUSIVE, - GL_SKIP, ghs + 1); - if (error) { - return ERR_PTR(error); - } - - gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs); - error = gfs2_glock_nq(ghs); - if (error) { - gfs2_glock_dq_uninit(ghs + 1); - return ERR_PTR(error); - } - - error = create_ok(dip, name, mode); - if (error) - goto fail_gunlock2; - } else { - error = gfs2_glock_nq_num(sdp, inum.no_addr, - &gfs2_inode_glops, LM_ST_EXCLUSIVE, - GL_SKIP, ghs + 1); - if (error) - goto fail_gunlock; - } + error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops, + LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1); + if (error) + goto fail_gunlock; - error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation); + error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev); if (error) goto fail_gunlock2; @@ -972,7 +932,7 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name, if (ip->i_di.di_entries != 2) { if (gfs2_consist_inode(ip)) - gfs2_dinode_print(&ip->i_di); + gfs2_dinode_print(ip); return -EIO; } @@ -994,7 +954,12 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name, if (error) return error; - error = gfs2_change_nlink(ip, -2); + /* It looks odd, but it really should be done twice */ + error = gfs2_change_nlink(ip, -1); + if (error) + return error; + + error = gfs2_change_nlink(ip, -1); if (error) return error; @@ -1015,16 +980,16 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name, int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name, struct gfs2_inode *ip) { - struct gfs2_inum inum; + struct gfs2_inum_host inum; unsigned int type; int error; if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode)) return -EPERM; - if ((dip->i_di.di_mode & S_ISVTX) && - dip->i_di.di_uid != current->fsuid && - ip->i_di.di_uid != current->fsuid && !capable(CAP_FOWNER)) + if ((dip->i_inode.i_mode & S_ISVTX) && + dip->i_inode.i_uid != current->fsuid && + ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER)) return -EPERM; if (IS_APPEND(&dip->i_inode)) @@ -1041,7 +1006,7 @@ int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name, if (!gfs2_inum_equal(&inum, &ip->i_num)) return -ENOENT; - if (IF2DT(ip->i_di.di_mode) != type) { + if (IF2DT(ip->i_inode.i_mode) != type) { gfs2_consist_inode(dip); return -EIO; } @@ -1191,7 +1156,7 @@ int gfs2_glock_nq_atime(struct gfs2_holder *gh) return 0; curtime = get_seconds(); - if (curtime - ip->i_di.di_atime >= quantum) { + if (curtime - ip->i_inode.i_atime.tv_sec >= quantum) { gfs2_glock_dq(gh); gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY, gh); @@ -1203,7 +1168,7 @@ int gfs2_glock_nq_atime(struct gfs2_holder *gh) trying to get exclusive lock. */ curtime = get_seconds(); - if (curtime - ip->i_di.di_atime >= quantum) { + if (curtime - ip->i_inode.i_atime.tv_sec >= quantum) { struct buffer_head *dibh; struct gfs2_dinode *di; @@ -1217,11 +1182,11 @@ int gfs2_glock_nq_atime(struct gfs2_holder *gh) if (error) goto fail_end_trans; - ip->i_di.di_atime = curtime; + ip->i_inode.i_atime.tv_sec = curtime; gfs2_trans_add_bh(ip->i_gl, dibh, 1); di = (struct gfs2_dinode *)dibh->b_data; - di->di_atime = cpu_to_be64(ip->i_di.di_atime); + di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec); brelse(dibh); gfs2_trans_end(sdp); @@ -1246,92 +1211,6 @@ fail: return error; } -/** - * glock_compare_atime - Compare two struct gfs2_glock structures for sort - * @arg_a: the first structure - * @arg_b: the second structure - * - * Returns: 1 if A > B - * -1 if A < B - * 0 if A == B - */ - -static int glock_compare_atime(const void *arg_a, const void *arg_b) -{ - const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a; - const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b; - const struct lm_lockname *a = &gh_a->gh_gl->gl_name; - const struct lm_lockname *b = &gh_b->gh_gl->gl_name; - - if (a->ln_number > b->ln_number) - return 1; - if (a->ln_number < b->ln_number) - return -1; - if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE) - return 1; - if (gh_a->gh_state == LM_ST_SHARED && (gh_b->gh_flags & GL_ATIME)) - return 1; - - return 0; -} - -/** - * gfs2_glock_nq_m_atime - acquire multiple glocks where one may need an - * atime update - * @num_gh: the number of structures - * @ghs: an array of struct gfs2_holder structures - * - * Returns: 0 on success (all glocks acquired), - * errno on failure (no glocks acquired) - */ - -int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs) -{ - struct gfs2_holder **p; - unsigned int x; - int error = 0; - - if (!num_gh) - return 0; - - if (num_gh == 1) { - ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); - if (ghs->gh_flags & GL_ATIME) - error = gfs2_glock_nq_atime(ghs); - else - error = gfs2_glock_nq(ghs); - return error; - } - - p = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL); - if (!p) - return -ENOMEM; - - for (x = 0; x < num_gh; x++) - p[x] = &ghs[x]; - - sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare_atime,NULL); - - for (x = 0; x < num_gh; x++) { - p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); - - if (p[x]->gh_flags & GL_ATIME) - error = gfs2_glock_nq_atime(p[x]); - else - error = gfs2_glock_nq(p[x]); - - if (error) { - while (x--) - gfs2_glock_dq(p[x]); - break; - } - } - - kfree(p); - return error; -} - - static int __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr) { @@ -1342,10 +1221,8 @@ __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr) if (!error) { error = inode_setattr(&ip->i_inode, attr); gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error); - gfs2_inode_attr_out(ip); - gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); } return error; diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index f5d861760579..b57f448b15bc 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h @@ -22,13 +22,19 @@ static inline int gfs2_is_jdata(struct gfs2_inode *ip) static inline int gfs2_is_dir(struct gfs2_inode *ip) { - return S_ISDIR(ip->i_di.di_mode); + return S_ISDIR(ip->i_inode.i_mode); +} + +static inline void gfs2_set_inode_blocks(struct inode *inode) +{ + struct gfs2_inode *ip = GFS2_I(inode); + inode->i_blocks = ip->i_di.di_blocks << + (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT); } void gfs2_inode_attr_in(struct gfs2_inode *ip); -void gfs2_inode_attr_out(struct gfs2_inode *ip); -struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum, unsigned type); -struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum *inum); +struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum_host *inum, unsigned type); +struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum_host *inum); int gfs2_inode_refresh(struct gfs2_inode *ip); @@ -37,19 +43,15 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff); struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, int is_root, struct nameidata *nd); struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name, - unsigned int mode); + unsigned int mode, dev_t dev); int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name, struct gfs2_inode *ip); int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name, struct gfs2_inode *ip); int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to); int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len); - int gfs2_glock_nq_atime(struct gfs2_holder *gh); -int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs); - int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr); - struct inode *gfs2_lookup_simple(struct inode *dip, const char *name); #endif /* __INODE_DOT_H__ */ diff --git a/fs/gfs2/locking/dlm/plock.c b/fs/gfs2/locking/dlm/plock.c index 7365aec9511b..3799f19b282f 100644 --- a/fs/gfs2/locking/dlm/plock.c +++ b/fs/gfs2/locking/dlm/plock.c @@ -8,6 +8,7 @@ #include <linux/miscdevice.h> #include <linux/lock_dlm_plock.h> +#include <linux/poll.h> #include "lock_dlm.h" diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 0cace3da9dbb..291415ddfe51 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -15,6 +15,7 @@ #include <linux/gfs2_ondisk.h> #include <linux/crc32.h> #include <linux/lm_interface.h> +#include <linux/delay.h> #include "gfs2.h" #include "incore.h" @@ -142,7 +143,7 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int fl return list_empty(&ai->ai_ail1_list); } -void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags) +static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags) { struct list_head *head = &sdp->sd_ail1_list; u64 sync_gen; @@ -261,6 +262,12 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail) * @sdp: The GFS2 superblock * @blks: The number of blocks to reserve * + * Note that we never give out the last 6 blocks of the journal. Thats + * due to the fact that there is are a small number of header blocks + * associated with each log flush. The exact number can't be known until + * flush time, so we ensure that we have just enough free blocks at all + * times to avoid running out during a log flush. + * * Returns: errno */ @@ -274,7 +281,7 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) mutex_lock(&sdp->sd_log_reserve_mutex); gfs2_log_lock(sdp); - while(sdp->sd_log_blks_free <= blks) { + while(sdp->sd_log_blks_free <= (blks + 6)) { gfs2_log_unlock(sdp); gfs2_ail1_empty(sdp, 0); gfs2_log_flush(sdp, NULL); @@ -319,7 +326,8 @@ static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn) bh_map.b_size = 1 << inode->i_blkbits; error = gfs2_block_map(inode, lbn, 0, &bh_map); if (error || !bh_map.b_blocknr) - printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error, bh_map.b_blocknr, lbn); + printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error, + (unsigned long long)bh_map.b_blocknr, lbn); gfs2_assert_withdraw(sdp, !error && bh_map.b_blocknr); return bh_map.b_blocknr; @@ -643,12 +651,9 @@ void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) up_read(&sdp->sd_log_flush_lock); gfs2_log_lock(sdp); - if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks)) { - gfs2_log_unlock(sdp); - gfs2_log_flush(sdp, NULL); - } else { - gfs2_log_unlock(sdp); - } + if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks)) + wake_up_process(sdp->sd_logd_process); + gfs2_log_unlock(sdp); } /** @@ -686,3 +691,21 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp) up_write(&sdp->sd_log_flush_lock); } + +/** + * gfs2_meta_syncfs - sync all the buffers in a filesystem + * @sdp: the filesystem + * + */ + +void gfs2_meta_syncfs(struct gfs2_sbd *sdp) +{ + gfs2_log_flush(sdp, NULL); + for (;;) { + gfs2_ail1_start(sdp, DIO_ALL); + if (gfs2_ail1_empty(sdp, DIO_ALL)) + break; + msleep(10); + } +} + diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h index 7f5737d55612..8e7aa0f29109 100644 --- a/fs/gfs2/log.h +++ b/fs/gfs2/log.h @@ -48,7 +48,6 @@ static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp, unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct, unsigned int ssize); -void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags); int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags); int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks); @@ -61,5 +60,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl); void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans); void gfs2_log_shutdown(struct gfs2_sbd *sdp); +void gfs2_meta_syncfs(struct gfs2_sbd *sdp); #endif /* __LOG_DOT_H__ */ diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index ab6d1115f95d..4d7f94d8c7bd 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -182,7 +182,7 @@ static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) } static void buf_lo_before_scan(struct gfs2_jdesc *jd, - struct gfs2_log_header *head, int pass) + struct gfs2_log_header_host *head, int pass) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); @@ -328,7 +328,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp) } static void revoke_lo_before_scan(struct gfs2_jdesc *jd, - struct gfs2_log_header *head, int pass) + struct gfs2_log_header_host *head, int pass) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); @@ -509,7 +509,7 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp) { LIST_HEAD(started); struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt; - struct buffer_head *bh = NULL; + struct buffer_head *bh = NULL,*bh1 = NULL; unsigned int offset = sizeof(struct gfs2_log_descriptor); struct gfs2_log_descriptor *ld; unsigned int limit; @@ -537,8 +537,13 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp) list_for_each_entry_safe_continue(bd1, bdt, &sdp->sd_log_le_databuf, bd_le.le_list) { + /* store off the buffer head in a local ptr since + * gfs2_bufdata might change when we drop the log lock + */ + bh1 = bd1->bd_bh; + /* An ordered write buffer */ - if (bd1->bd_bh && !buffer_pinned(bd1->bd_bh)) { + if (bh1 && !buffer_pinned(bh1)) { list_move(&bd1->bd_le.le_list, &started); if (bd1 == bd2) { bd2 = NULL; @@ -547,20 +552,21 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp) bd_le.le_list); } total_dbuf--; - if (bd1->bd_bh) { - get_bh(bd1->bd_bh); - if (buffer_dirty(bd1->bd_bh)) { + if (bh1) { + if (buffer_dirty(bh1)) { + get_bh(bh1); + gfs2_log_unlock(sdp); - wait_on_buffer(bd1->bd_bh); - ll_rw_block(WRITE, 1, - &bd1->bd_bh); + + ll_rw_block(SWRITE, 1, &bh1); + brelse(bh1); + gfs2_log_lock(sdp); } - brelse(bd1->bd_bh); continue; } continue; - } else if (bd1->bd_bh) { /* A journaled buffer */ + } else if (bh1) { /* A journaled buffer */ int magic; gfs2_log_unlock(sdp); if (!bh) { @@ -582,16 +588,16 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp) ld->ld_data2 = cpu_to_be32(0); memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved)); } - magic = gfs2_check_magic(bd1->bd_bh); - *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr); + magic = gfs2_check_magic(bh1); + *ptr++ = cpu_to_be64(bh1->b_blocknr); *ptr++ = cpu_to_be64((__u64)magic); - clear_buffer_escaped(bd1->bd_bh); + clear_buffer_escaped(bh1); if (unlikely(magic != 0)) - set_buffer_escaped(bd1->bd_bh); + set_buffer_escaped(bh1); gfs2_log_lock(sdp); if (n++ > num) break; - } else if (!bd1->bd_bh) { + } else if (!bh1) { total_dbuf--; sdp->sd_log_num_databuf--; list_del_init(&bd1->bd_le.le_list); diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h index 5839c05ae6be..965bc65c7c64 100644 --- a/fs/gfs2/lops.h +++ b/fs/gfs2/lops.h @@ -60,7 +60,7 @@ static inline void lops_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) } static inline void lops_before_scan(struct gfs2_jdesc *jd, - struct gfs2_log_header *head, + struct gfs2_log_header_host *head, unsigned int pass) { int x; diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index 21508a13bb78..7c1a9e22a526 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -25,7 +25,7 @@ #include "util.h" #include "glock.h" -static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags) +static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct gfs2_inode *ip = foo; if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == @@ -37,7 +37,7 @@ static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long } } -static void gfs2_init_glock_once(void *foo, kmem_cache_t *cachep, unsigned long flags) +static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct gfs2_glock *gl = foo; if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == @@ -84,8 +84,8 @@ static int __init init_gfs2_fs(void) gfs2_inode_cachep = kmem_cache_create("gfs2_inode", sizeof(struct gfs2_inode), - 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_PANIC|SLAB_MEM_SPREAD), + 0, SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD, gfs2_init_inode_once, NULL); if (!gfs2_inode_cachep) goto fail; diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 3912d6a4b1e6..0e34d9918973 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -127,17 +127,17 @@ void gfs2_meta_sync(struct gfs2_glock *gl) /** * getbuf - Get a buffer with a given address space - * @sdp: the filesystem - * @aspace: the address space + * @gl: the glock * @blkno: the block number (filesystem scope) * @create: 1 if the buffer should be created * * Returns: the buffer */ -static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace, - u64 blkno, int create) +static struct buffer_head *getbuf(struct gfs2_glock *gl, u64 blkno, int create) { + struct address_space *mapping = gl->gl_aspace->i_mapping; + struct gfs2_sbd *sdp = gl->gl_sbd; struct page *page; struct buffer_head *bh; unsigned int shift; @@ -150,13 +150,13 @@ static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace, if (create) { for (;;) { - page = grab_cache_page(aspace->i_mapping, index); + page = grab_cache_page(mapping, index); if (page) break; yield(); } } else { - page = find_lock_page(aspace->i_mapping, index); + page = find_lock_page(mapping, index); if (!page) return NULL; } @@ -202,7 +202,7 @@ static void meta_prep_new(struct buffer_head *bh) struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) { struct buffer_head *bh; - bh = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE); + bh = getbuf(gl, blkno, CREATE); meta_prep_new(bh); return bh; } @@ -220,7 +220,7 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, struct buffer_head **bhp) { - *bhp = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE); + *bhp = getbuf(gl, blkno, CREATE); if (!buffer_uptodate(*bhp)) ll_rw_block(READ_META, 1, bhp); if (flags & DIO_WAIT) { @@ -379,11 +379,10 @@ void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh, void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - struct inode *aspace = ip->i_gl->gl_aspace; struct buffer_head *bh; while (blen) { - bh = getbuf(sdp, aspace, bstart, NO_CREATE); + bh = getbuf(ip->i_gl, bstart, NO_CREATE); if (bh) { struct gfs2_bufdata *bd = bh->b_private; @@ -472,6 +471,9 @@ int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num, struct buffer_head *bh = NULL, **bh_slot = ip->i_cache + height; int in_cache = 0; + BUG_ON(!gl); + BUG_ON(!sdp); + spin_lock(&ip->i_spin); if (*bh_slot && (*bh_slot)->b_blocknr == num) { bh = *bh_slot; @@ -481,7 +483,7 @@ int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num, spin_unlock(&ip->i_spin); if (!bh) - bh = getbuf(gl->gl_sbd, gl->gl_aspace, num, CREATE); + bh = getbuf(gl, num, CREATE); if (!bh) return -ENOBUFS; @@ -532,7 +534,6 @@ err: struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) { struct gfs2_sbd *sdp = gl->gl_sbd; - struct inode *aspace = gl->gl_aspace; struct buffer_head *first_bh, *bh; u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >> sdp->sd_sb.sb_bsize_shift; @@ -544,7 +545,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) if (extlen > max_ra) extlen = max_ra; - first_bh = getbuf(sdp, aspace, dblock, CREATE); + first_bh = getbuf(gl, dblock, CREATE); if (buffer_uptodate(first_bh)) goto out; @@ -555,7 +556,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) extlen--; while (extlen) { - bh = getbuf(sdp, aspace, dblock, CREATE); + bh = getbuf(gl, dblock, CREATE); if (!buffer_uptodate(bh) && !buffer_locked(bh)) ll_rw_block(READA, 1, &bh); @@ -571,20 +572,3 @@ out: return first_bh; } -/** - * gfs2_meta_syncfs - sync all the buffers in a filesystem - * @sdp: the filesystem - * - */ - -void gfs2_meta_syncfs(struct gfs2_sbd *sdp) -{ - gfs2_log_flush(sdp, NULL); - for (;;) { - gfs2_ail1_start(sdp, DIO_ALL); - if (gfs2_ail1_empty(sdp, DIO_ALL)) - break; - msleep(10); - } -} - diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h index 3ec939e20dff..e037425bc042 100644 --- a/fs/gfs2/meta_io.h +++ b/fs/gfs2/meta_io.h @@ -67,7 +67,6 @@ static inline int gfs2_meta_inode_buffer(struct gfs2_inode *ip, } struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen); -void gfs2_meta_syncfs(struct gfs2_sbd *sdp); #define buffer_busy(bh) \ ((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned))) diff --git a/fs/gfs2/ondisk.c b/fs/gfs2/ondisk.c index 1025960b0e6e..f2495f1e21ad 100644 --- a/fs/gfs2/ondisk.c +++ b/fs/gfs2/ondisk.c @@ -15,6 +15,8 @@ #include "gfs2.h" #include <linux/gfs2_ondisk.h> +#include <linux/lm_interface.h> +#include "incore.h" #define pv(struct, member, fmt) printk(KERN_INFO " "#member" = "fmt"\n", \ struct->member); @@ -32,7 +34,7 @@ * first arg: the cpu-order structure */ -void gfs2_inum_in(struct gfs2_inum *no, const void *buf) +void gfs2_inum_in(struct gfs2_inum_host *no, const void *buf) { const struct gfs2_inum *str = buf; @@ -40,7 +42,7 @@ void gfs2_inum_in(struct gfs2_inum *no, const void *buf) no->no_addr = be64_to_cpu(str->no_addr); } -void gfs2_inum_out(const struct gfs2_inum *no, void *buf) +void gfs2_inum_out(const struct gfs2_inum_host *no, void *buf) { struct gfs2_inum *str = buf; @@ -48,13 +50,13 @@ void gfs2_inum_out(const struct gfs2_inum *no, void *buf) str->no_addr = cpu_to_be64(no->no_addr); } -static void gfs2_inum_print(const struct gfs2_inum *no) +static void gfs2_inum_print(const struct gfs2_inum_host *no) { printk(KERN_INFO " no_formal_ino = %llu\n", (unsigned long long)no->no_formal_ino); printk(KERN_INFO " no_addr = %llu\n", (unsigned long long)no->no_addr); } -static void gfs2_meta_header_in(struct gfs2_meta_header *mh, const void *buf) +static void gfs2_meta_header_in(struct gfs2_meta_header_host *mh, const void *buf) { const struct gfs2_meta_header *str = buf; @@ -63,23 +65,7 @@ static void gfs2_meta_header_in(struct gfs2_meta_header *mh, const void *buf) mh->mh_format = be32_to_cpu(str->mh_format); } -static void gfs2_meta_header_out(const struct gfs2_meta_header *mh, void *buf) -{ - struct gfs2_meta_header *str = buf; - - str->mh_magic = cpu_to_be32(mh->mh_magic); - str->mh_type = cpu_to_be32(mh->mh_type); - str->mh_format = cpu_to_be32(mh->mh_format); -} - -static void gfs2_meta_header_print(const struct gfs2_meta_header *mh) -{ - pv(mh, mh_magic, "0x%.8X"); - pv(mh, mh_type, "%u"); - pv(mh, mh_format, "%u"); -} - -void gfs2_sb_in(struct gfs2_sb *sb, const void *buf) +void gfs2_sb_in(struct gfs2_sb_host *sb, const void *buf) { const struct gfs2_sb *str = buf; @@ -97,7 +83,7 @@ void gfs2_sb_in(struct gfs2_sb *sb, const void *buf) memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN); } -void gfs2_rindex_in(struct gfs2_rindex *ri, const void *buf) +void gfs2_rindex_in(struct gfs2_rindex_host *ri, const void *buf) { const struct gfs2_rindex *str = buf; @@ -109,7 +95,7 @@ void gfs2_rindex_in(struct gfs2_rindex *ri, const void *buf) } -void gfs2_rindex_print(const struct gfs2_rindex *ri) +void gfs2_rindex_print(const struct gfs2_rindex_host *ri) { printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)ri->ri_addr); pv(ri, ri_length, "%u"); @@ -120,22 +106,20 @@ void gfs2_rindex_print(const struct gfs2_rindex *ri) pv(ri, ri_bitbytes, "%u"); } -void gfs2_rgrp_in(struct gfs2_rgrp *rg, const void *buf) +void gfs2_rgrp_in(struct gfs2_rgrp_host *rg, const void *buf) { const struct gfs2_rgrp *str = buf; - gfs2_meta_header_in(&rg->rg_header, buf); rg->rg_flags = be32_to_cpu(str->rg_flags); rg->rg_free = be32_to_cpu(str->rg_free); rg->rg_dinodes = be32_to_cpu(str->rg_dinodes); rg->rg_igeneration = be64_to_cpu(str->rg_igeneration); } -void gfs2_rgrp_out(const struct gfs2_rgrp *rg, void *buf) +void gfs2_rgrp_out(const struct gfs2_rgrp_host *rg, void *buf) { struct gfs2_rgrp *str = buf; - gfs2_meta_header_out(&rg->rg_header, buf); str->rg_flags = cpu_to_be32(rg->rg_flags); str->rg_free = cpu_to_be32(rg->rg_free); str->rg_dinodes = cpu_to_be32(rg->rg_dinodes); @@ -144,7 +128,7 @@ void gfs2_rgrp_out(const struct gfs2_rgrp *rg, void *buf) memset(&str->rg_reserved, 0, sizeof(str->rg_reserved)); } -void gfs2_quota_in(struct gfs2_quota *qu, const void *buf) +void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf) { const struct gfs2_quota *str = buf; @@ -153,96 +137,56 @@ void gfs2_quota_in(struct gfs2_quota *qu, const void *buf) qu->qu_value = be64_to_cpu(str->qu_value); } -void gfs2_dinode_in(struct gfs2_dinode *di, const void *buf) -{ - const struct gfs2_dinode *str = buf; - - gfs2_meta_header_in(&di->di_header, buf); - gfs2_inum_in(&di->di_num, &str->di_num); - - di->di_mode = be32_to_cpu(str->di_mode); - di->di_uid = be32_to_cpu(str->di_uid); - di->di_gid = be32_to_cpu(str->di_gid); - di->di_nlink = be32_to_cpu(str->di_nlink); - di->di_size = be64_to_cpu(str->di_size); - di->di_blocks = be64_to_cpu(str->di_blocks); - di->di_atime = be64_to_cpu(str->di_atime); - di->di_mtime = be64_to_cpu(str->di_mtime); - di->di_ctime = be64_to_cpu(str->di_ctime); - di->di_major = be32_to_cpu(str->di_major); - di->di_minor = be32_to_cpu(str->di_minor); - - di->di_goal_meta = be64_to_cpu(str->di_goal_meta); - di->di_goal_data = be64_to_cpu(str->di_goal_data); - di->di_generation = be64_to_cpu(str->di_generation); - - di->di_flags = be32_to_cpu(str->di_flags); - di->di_payload_format = be32_to_cpu(str->di_payload_format); - di->di_height = be16_to_cpu(str->di_height); - - di->di_depth = be16_to_cpu(str->di_depth); - di->di_entries = be32_to_cpu(str->di_entries); - - di->di_eattr = be64_to_cpu(str->di_eattr); - -} - -void gfs2_dinode_out(const struct gfs2_dinode *di, void *buf) +void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) { + const struct gfs2_dinode_host *di = &ip->i_di; struct gfs2_dinode *str = buf; - gfs2_meta_header_out(&di->di_header, buf); - gfs2_inum_out(&di->di_num, (char *)&str->di_num); + str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC); + str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI); + str->di_header.__pad0 = 0; + str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI); + str->di_header.__pad1 = 0; - str->di_mode = cpu_to_be32(di->di_mode); - str->di_uid = cpu_to_be32(di->di_uid); - str->di_gid = cpu_to_be32(di->di_gid); - str->di_nlink = cpu_to_be32(di->di_nlink); + gfs2_inum_out(&ip->i_num, &str->di_num); + + str->di_mode = cpu_to_be32(ip->i_inode.i_mode); + str->di_uid = cpu_to_be32(ip->i_inode.i_uid); + str->di_gid = cpu_to_be32(ip->i_inode.i_gid); + str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink); str->di_size = cpu_to_be64(di->di_size); str->di_blocks = cpu_to_be64(di->di_blocks); - str->di_atime = cpu_to_be64(di->di_atime); - str->di_mtime = cpu_to_be64(di->di_mtime); - str->di_ctime = cpu_to_be64(di->di_ctime); - str->di_major = cpu_to_be32(di->di_major); - str->di_minor = cpu_to_be32(di->di_minor); + str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec); + str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec); + str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec); str->di_goal_meta = cpu_to_be64(di->di_goal_meta); str->di_goal_data = cpu_to_be64(di->di_goal_data); str->di_generation = cpu_to_be64(di->di_generation); str->di_flags = cpu_to_be32(di->di_flags); - str->di_payload_format = cpu_to_be32(di->di_payload_format); str->di_height = cpu_to_be16(di->di_height); - + str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) && + !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ? + GFS2_FORMAT_DE : 0); str->di_depth = cpu_to_be16(di->di_depth); str->di_entries = cpu_to_be32(di->di_entries); str->di_eattr = cpu_to_be64(di->di_eattr); - } -void gfs2_dinode_print(const struct gfs2_dinode *di) +void gfs2_dinode_print(const struct gfs2_inode *ip) { - gfs2_meta_header_print(&di->di_header); - gfs2_inum_print(&di->di_num); + const struct gfs2_dinode_host *di = &ip->i_di; + + gfs2_inum_print(&ip->i_num); - pv(di, di_mode, "0%o"); - pv(di, di_uid, "%u"); - pv(di, di_gid, "%u"); - pv(di, di_nlink, "%u"); printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size); printk(KERN_INFO " di_blocks = %llu\n", (unsigned long long)di->di_blocks); - printk(KERN_INFO " di_atime = %lld\n", (long long)di->di_atime); - printk(KERN_INFO " di_mtime = %lld\n", (long long)di->di_mtime); - printk(KERN_INFO " di_ctime = %lld\n", (long long)di->di_ctime); - pv(di, di_major, "%u"); - pv(di, di_minor, "%u"); - printk(KERN_INFO " di_goal_meta = %llu\n", (unsigned long long)di->di_goal_meta); printk(KERN_INFO " di_goal_data = %llu\n", (unsigned long long)di->di_goal_data); pv(di, di_flags, "0x%.8X"); - pv(di, di_payload_format, "%u"); pv(di, di_height, "%u"); pv(di, di_depth, "%u"); @@ -251,7 +195,7 @@ void gfs2_dinode_print(const struct gfs2_dinode *di) printk(KERN_INFO " di_eattr = %llu\n", (unsigned long long)di->di_eattr); } -void gfs2_log_header_in(struct gfs2_log_header *lh, const void *buf) +void gfs2_log_header_in(struct gfs2_log_header_host *lh, const void *buf) { const struct gfs2_log_header *str = buf; @@ -263,7 +207,7 @@ void gfs2_log_header_in(struct gfs2_log_header *lh, const void *buf) lh->lh_hash = be32_to_cpu(str->lh_hash); } -void gfs2_inum_range_in(struct gfs2_inum_range *ir, const void *buf) +void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf) { const struct gfs2_inum_range *str = buf; @@ -271,7 +215,7 @@ void gfs2_inum_range_in(struct gfs2_inum_range *ir, const void *buf) ir->ir_length = be64_to_cpu(str->ir_length); } -void gfs2_inum_range_out(const struct gfs2_inum_range *ir, void *buf) +void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf) { struct gfs2_inum_range *str = buf; @@ -279,7 +223,7 @@ void gfs2_inum_range_out(const struct gfs2_inum_range *ir, void *buf) str->ir_length = cpu_to_be64(ir->ir_length); } -void gfs2_statfs_change_in(struct gfs2_statfs_change *sc, const void *buf) +void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf) { const struct gfs2_statfs_change *str = buf; @@ -288,7 +232,7 @@ void gfs2_statfs_change_in(struct gfs2_statfs_change *sc, const void *buf) sc->sc_dinodes = be64_to_cpu(str->sc_dinodes); } -void gfs2_statfs_change_out(const struct gfs2_statfs_change *sc, void *buf) +void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf) { struct gfs2_statfs_change *str = buf; @@ -297,7 +241,7 @@ void gfs2_statfs_change_out(const struct gfs2_statfs_change *sc, void *buf) str->sc_dinodes = cpu_to_be64(sc->sc_dinodes); } -void gfs2_quota_change_in(struct gfs2_quota_change *qc, const void *buf) +void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf) { const struct gfs2_quota_change *str = buf; diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c index 015640b3f123..d8d69a72a10d 100644 --- a/fs/gfs2/ops_address.c +++ b/fs/gfs2/ops_address.c @@ -156,19 +156,6 @@ out_ignore: return 0; } -static int zero_readpage(struct page *page) -{ - void *kaddr; - - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr, 0, PAGE_CACHE_SIZE); - kunmap_atomic(kaddr, KM_USER0); - - SetPageUptodate(page); - - return 0; -} - /** * stuffed_readpage - Fill in a Linux page with stuffed file data * @ip: the inode @@ -183,9 +170,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) void *kaddr; int error; - /* Only the first page of a stuffed file might contain data */ - if (unlikely(page->index)) - return zero_readpage(page); + BUG_ON(page->index); error = gfs2_meta_inode_buffer(ip, &dibh); if (error) @@ -230,9 +215,9 @@ static int gfs2_readpage(struct file *file, struct page *page) /* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */ goto skip_lock; } - gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|GL_AOP, &gh); + gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh); do_unlock = 1; - error = gfs2_glock_nq_m_atime(1, &gh); + error = gfs2_glock_nq_atime(&gh); if (unlikely(error)) goto out_unlock; } @@ -254,6 +239,8 @@ skip_lock: out: return error; out_unlock: + if (error == GLR_TRYFAILED) + error = AOP_TRUNCATED_PAGE; unlock_page(page); if (do_unlock) gfs2_holder_uninit(&gh); @@ -293,9 +280,9 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping, goto skip_lock; } gfs2_holder_init(ip->i_gl, LM_ST_SHARED, - LM_FLAG_TRY_1CB|GL_ATIME|GL_AOP, &gh); + LM_FLAG_TRY_1CB|GL_ATIME, &gh); do_unlock = 1; - ret = gfs2_glock_nq_m_atime(1, &gh); + ret = gfs2_glock_nq_atime(&gh); if (ret == GLR_TRYFAILED) goto out_noerror; if (unlikely(ret)) @@ -366,10 +353,13 @@ static int gfs2_prepare_write(struct file *file, struct page *page, unsigned int write_len = to - from; - gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|GL_AOP, &ip->i_gh); - error = gfs2_glock_nq_m_atime(1, &ip->i_gh); - if (error) + gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh); + error = gfs2_glock_nq_atime(&ip->i_gh); + if (unlikely(error)) { + if (error == GLR_TRYFAILED) + error = AOP_TRUNCATED_PAGE; goto out_uninit; + } gfs2_write_calc_reserv(ip, write_len, &data_blocks, &ind_blocks); @@ -386,7 +376,7 @@ static int gfs2_prepare_write(struct file *file, struct page *page, if (error) goto out_alloc_put; - error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid); + error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid); if (error) goto out_qunlock; @@ -482,8 +472,10 @@ static int gfs2_commit_write(struct file *file, struct page *page, SetPageUptodate(page); - if (inode->i_size < file_size) + if (inode->i_size < file_size) { i_size_write(inode, file_size); + mark_inode_dirty(inode); + } } else { if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip)) @@ -498,11 +490,6 @@ static int gfs2_commit_write(struct file *file, struct page *page, di->di_size = cpu_to_be64(inode->i_size); } - di->di_mode = cpu_to_be32(inode->i_mode); - di->di_atime = cpu_to_be64(inode->i_atime.tv_sec); - di->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec); - di->di_ctime = cpu_to_be64(inode->i_ctime.tv_sec); - brelse(dibh); gfs2_trans_end(sdp); if (al->al_requested) { @@ -624,7 +611,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, * on this path. All we need change is atime. */ gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh); - rv = gfs2_glock_nq_m_atime(1, &gh); + rv = gfs2_glock_nq_atime(&gh); if (rv) goto out; @@ -737,6 +724,9 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask) if (!atomic_read(&aspace->i_writecount)) return 0; + if (!(gfp_mask & __GFP_WAIT)) + return 0; + if (time_after_eq(jiffies, t)) { stuck_releasepage(bh); /* should we withdraw here? */ diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c index 00041b1b8025..d355899585d8 100644 --- a/fs/gfs2/ops_dentry.c +++ b/fs/gfs2/ops_dentry.c @@ -43,7 +43,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd) struct inode *inode = dentry->d_inode; struct gfs2_holder d_gh; struct gfs2_inode *ip; - struct gfs2_inum inum; + struct gfs2_inum_host inum; unsigned int type; int error; @@ -76,7 +76,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd) if (!gfs2_inum_equal(&ip->i_num, &inum)) goto invalid_gunlock; - if (IF2DT(ip->i_di.di_mode) != type) { + if (IF2DT(ip->i_inode.i_mode) != type) { gfs2_consist_inode(dip); goto fail_gunlock; } diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c index 86127d93bd35..b4e7b8775315 100644 --- a/fs/gfs2/ops_export.c +++ b/fs/gfs2/ops_export.c @@ -27,15 +27,16 @@ #include "util.h" static struct dentry *gfs2_decode_fh(struct super_block *sb, - __u32 *fh, + __u32 *p, int fh_len, int fh_type, int (*acceptable)(void *context, struct dentry *dentry), void *context) { + __be32 *fh = (__force __be32 *)p; struct gfs2_fh_obj fh_obj; - struct gfs2_inum *this, parent; + struct gfs2_inum_host *this, parent; if (fh_type != fh_len) return NULL; @@ -65,9 +66,10 @@ static struct dentry *gfs2_decode_fh(struct super_block *sb, acceptable, context); } -static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len, +static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len, int connectable) { + __be32 *fh = (__force __be32 *)p; struct inode *inode = dentry->d_inode; struct super_block *sb = inode->i_sb; struct gfs2_inode *ip = GFS2_I(inode); @@ -76,14 +78,10 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len, (connectable && *len < GFS2_LARGE_FH_SIZE)) return 255; - fh[0] = ip->i_num.no_formal_ino >> 32; - fh[0] = cpu_to_be32(fh[0]); - fh[1] = ip->i_num.no_formal_ino & 0xFFFFFFFF; - fh[1] = cpu_to_be32(fh[1]); - fh[2] = ip->i_num.no_addr >> 32; - fh[2] = cpu_to_be32(fh[2]); - fh[3] = ip->i_num.no_addr & 0xFFFFFFFF; - fh[3] = cpu_to_be32(fh[3]); + fh[0] = cpu_to_be32(ip->i_num.no_formal_ino >> 32); + fh[1] = cpu_to_be32(ip->i_num.no_formal_ino & 0xFFFFFFFF); + fh[2] = cpu_to_be32(ip->i_num.no_addr >> 32); + fh[3] = cpu_to_be32(ip->i_num.no_addr & 0xFFFFFFFF); *len = GFS2_SMALL_FH_SIZE; if (!connectable || inode == sb->s_root->d_inode) @@ -95,14 +93,10 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len, igrab(inode); spin_unlock(&dentry->d_lock); - fh[4] = ip->i_num.no_formal_ino >> 32; - fh[4] = cpu_to_be32(fh[4]); - fh[5] = ip->i_num.no_formal_ino & 0xFFFFFFFF; - fh[5] = cpu_to_be32(fh[5]); - fh[6] = ip->i_num.no_addr >> 32; - fh[6] = cpu_to_be32(fh[6]); - fh[7] = ip->i_num.no_addr & 0xFFFFFFFF; - fh[7] = cpu_to_be32(fh[7]); + fh[4] = cpu_to_be32(ip->i_num.no_formal_ino >> 32); + fh[5] = cpu_to_be32(ip->i_num.no_formal_ino & 0xFFFFFFFF); + fh[6] = cpu_to_be32(ip->i_num.no_addr >> 32); + fh[7] = cpu_to_be32(ip->i_num.no_addr & 0xFFFFFFFF); fh[8] = cpu_to_be32(inode->i_mode); fh[9] = 0; /* pad to double word */ @@ -114,12 +108,12 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len, } struct get_name_filldir { - struct gfs2_inum inum; + struct gfs2_inum_host inum; char *name; }; static int get_name_filldir(void *opaque, const char *name, unsigned int length, - u64 offset, struct gfs2_inum *inum, + u64 offset, struct gfs2_inum_host *inum, unsigned int type) { struct get_name_filldir *gnfd = (struct get_name_filldir *)opaque; @@ -202,7 +196,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, void *inum_obj) { struct gfs2_sbd *sdp = sb->s_fs_info; struct gfs2_fh_obj *fh_obj = (struct gfs2_fh_obj *)inum_obj; - struct gfs2_inum *inum = &fh_obj->this; + struct gfs2_inum_host *inum = &fh_obj->this; struct gfs2_holder i_gh, ri_gh, rgd_gh; struct gfs2_rgrpd *rgd; struct inode *inode; diff --git a/fs/gfs2/ops_export.h b/fs/gfs2/ops_export.h index 09aca5046fb1..f925a955b3b8 100644 --- a/fs/gfs2/ops_export.h +++ b/fs/gfs2/ops_export.h @@ -15,7 +15,7 @@ extern struct export_operations gfs2_export_ops; struct gfs2_fh_obj { - struct gfs2_inum this; + struct gfs2_inum_host this; __u32 imode; }; diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c index 3064f133bf3c..b3f1e0349ae0 100644 --- a/fs/gfs2/ops_file.c +++ b/fs/gfs2/ops_file.c @@ -22,6 +22,7 @@ #include <linux/ext2_fs.h> #include <linux/crc32.h> #include <linux/lm_interface.h> +#include <linux/writeback.h> #include <asm/uaccess.h> #include "gfs2.h" @@ -71,7 +72,7 @@ static int gfs2_read_actor(read_descriptor_t *desc, struct page *page, size = count; kaddr = kmap(page); - memcpy(desc->arg.buf, kaddr + offset, size); + memcpy(desc->arg.data, kaddr + offset, size); kunmap(page); desc->count = count - size; @@ -86,7 +87,7 @@ int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state, struct inode *inode = &ip->i_inode; read_descriptor_t desc; desc.written = 0; - desc.arg.buf = buf; + desc.arg.data = buf; desc.count = size; desc.error = 0; do_generic_mapping_read(inode->i_mapping, ra_state, @@ -139,7 +140,7 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin) */ static int filldir_func(void *opaque, const char *name, unsigned int length, - u64 offset, struct gfs2_inum *inum, + u64 offset, struct gfs2_inum_host *inum, unsigned int type) { struct filldir_reg *fdr = (struct filldir_reg *)opaque; @@ -253,7 +254,7 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr) u32 fsflags; gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh); - error = gfs2_glock_nq_m_atime(1, &gh); + error = gfs2_glock_nq_atime(&gh); if (error) return error; @@ -266,6 +267,24 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr) return error; } +void gfs2_set_inode_flags(struct inode *inode) +{ + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_dinode_host *di = &ip->i_di; + unsigned int flags = inode->i_flags; + + flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); + if (di->di_flags & GFS2_DIF_IMMUTABLE) + flags |= S_IMMUTABLE; + if (di->di_flags & GFS2_DIF_APPENDONLY) + flags |= S_APPEND; + if (di->di_flags & GFS2_DIF_NOATIME) + flags |= S_NOATIME; + if (di->di_flags & GFS2_DIF_SYNC) + flags |= S_SYNC; + inode->i_flags = flags; +} + /* Flags that can be set by user space */ #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \ GFS2_DIF_DIRECTIO| \ @@ -336,8 +355,9 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) goto out_trans_end; gfs2_trans_add_bh(ip->i_gl, bh, 1); ip->i_di.di_flags = new_flags; - gfs2_dinode_out(&ip->i_di, bh->b_data); + gfs2_dinode_out(ip, bh->b_data); brelse(bh); + gfs2_set_inode_flags(inode); out_trans_end: gfs2_trans_end(sdp); out: @@ -425,7 +445,7 @@ static int gfs2_open(struct inode *inode, struct file *file) gfs2_assert_warn(GFS2_SB(inode), !file->private_data); file->private_data = fp; - if (S_ISREG(ip->i_di.di_mode)) { + if (S_ISREG(ip->i_inode.i_mode)) { error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); if (error) @@ -484,16 +504,40 @@ static int gfs2_close(struct inode *inode, struct file *file) * @file: the file that points to the dentry (we ignore this) * @dentry: the dentry that points to the inode to sync * + * The VFS will flush "normal" data for us. We only need to worry + * about metadata here. For journaled data, we just do a log flush + * as we can't avoid it. Otherwise we can just bale out if datasync + * is set. For stuffed inodes we must flush the log in order to + * ensure that all data is on disk. + * + * The call to write_inode_now() is there to write back metadata and + * the inode itself. It does also try and write the data, but thats + * (hopefully) a no-op due to the VFS having already called filemap_fdatawrite() + * for us. + * * Returns: errno */ static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync) { - struct gfs2_inode *ip = GFS2_I(dentry->d_inode); + struct inode *inode = dentry->d_inode; + int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC); + int ret = 0; - gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); + if (gfs2_is_jdata(GFS2_I(inode))) { + gfs2_log_flush(GFS2_SB(inode), GFS2_I(inode)->i_gl); + return 0; + } - return 0; + if (sync_state != 0) { + if (!datasync) + ret = write_inode_now(inode, 0); + + if (gfs2_is_stuffed(GFS2_I(inode))) + gfs2_log_flush(GFS2_SB(inode), GFS2_I(inode)->i_gl); + } + + return ret; } /** @@ -515,7 +559,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) if (!(fl->fl_flags & FL_POSIX)) return -ENOLCK; - if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID) + if ((ip->i_inode.i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) return -ENOLCK; if (sdp->sd_args.ar_localflocks) { @@ -617,7 +661,7 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl) if (!(fl->fl_flags & FL_FLOCK)) return -ENOLCK; - if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID) + if ((ip->i_inode.i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) return -ENOLCK; if (sdp->sd_args.ar_localflocks) diff --git a/fs/gfs2/ops_file.h b/fs/gfs2/ops_file.h index ce319f89ec8e..7e5d8ec9c846 100644 --- a/fs/gfs2/ops_file.h +++ b/fs/gfs2/ops_file.h @@ -17,7 +17,7 @@ extern struct file gfs2_internal_file_sentinel; extern int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state, char *buf, loff_t *pos, unsigned size); - +extern void gfs2_set_inode_flags(struct inode *inode); extern const struct file_operations gfs2_file_fops; extern const struct file_operations gfs2_dir_fops; diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 882873a6bd69..d14e139d2674 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -237,7 +237,7 @@ fail: } static struct inode *gfs2_lookup_root(struct super_block *sb, - struct gfs2_inum *inum) + struct gfs2_inum_host *inum) { return gfs2_inode_lookup(sb, inum, DT_DIR); } @@ -246,7 +246,7 @@ static int init_sb(struct gfs2_sbd *sdp, int silent, int undo) { struct super_block *sb = sdp->sd_vfs; struct gfs2_holder sb_gh; - struct gfs2_inum *inum; + struct gfs2_inum_host *inum; struct inode *inode; int error = 0; diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index ef6e5ed70e94..636dda4c7d38 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c @@ -59,7 +59,7 @@ static int gfs2_create(struct inode *dir, struct dentry *dentry, gfs2_holder_init(dip->i_gl, 0, 0, ghs); for (;;) { - inode = gfs2_createi(ghs, &dentry->d_name, S_IFREG | mode); + inode = gfs2_createi(ghs, &dentry->d_name, S_IFREG | mode, 0); if (!IS_ERR(inode)) { gfs2_trans_end(sdp); if (dip->i_alloc.al_rgd) @@ -144,7 +144,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, int alloc_required; int error; - if (S_ISDIR(ip->i_di.di_mode)) + if (S_ISDIR(inode->i_mode)) return -EPERM; gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); @@ -169,7 +169,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, } error = -EINVAL; - if (!dip->i_di.di_nlink) + if (!dip->i_inode.i_nlink) goto out_gunlock; error = -EFBIG; if (dip->i_di.di_entries == (u32)-1) @@ -178,10 +178,10 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) goto out_gunlock; error = -EINVAL; - if (!ip->i_di.di_nlink) + if (!ip->i_inode.i_nlink) goto out_gunlock; error = -EMLINK; - if (ip->i_di.di_nlink == (u32)-1) + if (ip->i_inode.i_nlink == (u32)-1) goto out_gunlock; alloc_required = error = gfs2_diradd_alloc_required(dir, &dentry->d_name); @@ -196,8 +196,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, if (error) goto out_alloc; - error = gfs2_quota_check(dip, dip->i_di.di_uid, - dip->i_di.di_gid); + error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid); if (error) goto out_gunlock_q; @@ -220,7 +219,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, } error = gfs2_dir_add(dir, &dentry->d_name, &ip->i_num, - IF2DT(ip->i_di.di_mode)); + IF2DT(inode->i_mode)); if (error) goto out_end_trans; @@ -326,7 +325,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry, gfs2_holder_init(dip->i_gl, 0, 0, ghs); - inode = gfs2_createi(ghs, &dentry->d_name, S_IFLNK | S_IRWXUGO); + inode = gfs2_createi(ghs, &dentry->d_name, S_IFLNK | S_IRWXUGO, 0); if (IS_ERR(inode)) { gfs2_holder_uninit(ghs); return PTR_ERR(inode); @@ -339,7 +338,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry, error = gfs2_meta_inode_buffer(ip, &dibh); if (!gfs2_assert_withdraw(sdp, !error)) { - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); memcpy(dibh->b_data + sizeof(struct gfs2_dinode), symname, size); brelse(dibh); @@ -379,7 +378,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode) gfs2_holder_init(dip->i_gl, 0, 0, ghs); - inode = gfs2_createi(ghs, &dentry->d_name, S_IFDIR | mode); + inode = gfs2_createi(ghs, &dentry->d_name, S_IFDIR | mode, 0); if (IS_ERR(inode)) { gfs2_holder_uninit(ghs); return PTR_ERR(inode); @@ -387,10 +386,9 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode) ip = ghs[1].gh_gl->gl_object; - ip->i_di.di_nlink = 2; + ip->i_inode.i_nlink = 2; ip->i_di.di_size = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); ip->i_di.di_flags |= GFS2_DIF_JDATA; - ip->i_di.di_payload_format = GFS2_FORMAT_DE; ip->i_di.di_entries = 2; error = gfs2_meta_inode_buffer(ip, &dibh); @@ -414,7 +412,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode) gfs2_inum_out(&dip->i_num, &dent->de_inum); dent->de_type = cpu_to_be16(DT_DIR); - gfs2_dinode_out(&ip->i_di, di); + gfs2_dinode_out(ip, di); brelse(dibh); } @@ -467,7 +465,7 @@ static int gfs2_rmdir(struct inode *dir, struct dentry *dentry) if (ip->i_di.di_entries < 2) { if (gfs2_consist_inode(ip)) - gfs2_dinode_print(&ip->i_di); + gfs2_dinode_print(ip); error = -EIO; goto out_gunlock; } @@ -504,47 +502,19 @@ out: static int gfs2_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) { - struct gfs2_inode *dip = GFS2_I(dir), *ip; + struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_sbd *sdp = GFS2_SB(dir); struct gfs2_holder ghs[2]; struct inode *inode; - struct buffer_head *dibh; - u32 major = 0, minor = 0; - int error; - - switch (mode & S_IFMT) { - case S_IFBLK: - case S_IFCHR: - major = MAJOR(dev); - minor = MINOR(dev); - break; - case S_IFIFO: - case S_IFSOCK: - break; - default: - return -EOPNOTSUPP; - }; gfs2_holder_init(dip->i_gl, 0, 0, ghs); - inode = gfs2_createi(ghs, &dentry->d_name, mode); + inode = gfs2_createi(ghs, &dentry->d_name, mode, dev); if (IS_ERR(inode)) { gfs2_holder_uninit(ghs); return PTR_ERR(inode); } - ip = ghs[1].gh_gl->gl_object; - - ip->i_di.di_major = major; - ip->i_di.di_minor = minor; - - error = gfs2_meta_inode_buffer(ip, &dibh); - - if (!gfs2_assert_withdraw(sdp, !error)) { - gfs2_dinode_out(&ip->i_di, dibh->b_data); - brelse(dibh); - } - gfs2_trans_end(sdp); if (dip->i_alloc.al_rgd) gfs2_inplace_release(dip); @@ -592,11 +562,10 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, /* Make sure we aren't trying to move a dirctory into it's subdir */ - if (S_ISDIR(ip->i_di.di_mode) && odip != ndip) { + if (S_ISDIR(ip->i_inode.i_mode) && odip != ndip) { dir_rename = 1; - error = gfs2_glock_nq_init(sdp->sd_rename_gl, - LM_ST_EXCLUSIVE, 0, + error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE, 0, &r_gh); if (error) goto out; @@ -637,10 +606,10 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, if (error) goto out_gunlock; - if (S_ISDIR(nip->i_di.di_mode)) { + if (S_ISDIR(nip->i_inode.i_mode)) { if (nip->i_di.di_entries < 2) { if (gfs2_consist_inode(nip)) - gfs2_dinode_print(&nip->i_di); + gfs2_dinode_print(nip); error = -EIO; goto out_gunlock; } @@ -666,7 +635,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, }; if (odip != ndip) { - if (!ndip->i_di.di_nlink) { + if (!ndip->i_inode.i_nlink) { error = -EINVAL; goto out_gunlock; } @@ -674,8 +643,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, error = -EFBIG; goto out_gunlock; } - if (S_ISDIR(ip->i_di.di_mode) && - ndip->i_di.di_nlink == (u32)-1) { + if (S_ISDIR(ip->i_inode.i_mode) && + ndip->i_inode.i_nlink == (u32)-1) { error = -EMLINK; goto out_gunlock; } @@ -702,8 +671,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, if (error) goto out_alloc; - error = gfs2_quota_check(ndip, ndip->i_di.di_uid, - ndip->i_di.di_gid); + error = gfs2_quota_check(ndip, ndip->i_inode.i_uid, ndip->i_inode.i_gid); if (error) goto out_gunlock_q; @@ -729,7 +697,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, /* Remove the target file, if it exists */ if (nip) { - if (S_ISDIR(nip->i_di.di_mode)) + if (S_ISDIR(nip->i_inode.i_mode)) error = gfs2_rmdiri(ndip, &ndentry->d_name, nip); else { error = gfs2_dir_del(ndip, &ndentry->d_name); @@ -760,9 +728,9 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto out_end_trans; - ip->i_di.di_ctime = get_seconds(); + ip->i_inode.i_ctime.tv_sec = get_seconds(); gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); } @@ -771,7 +739,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, goto out_end_trans; error = gfs2_dir_add(ndir, &ndentry->d_name, &ip->i_num, - IF2DT(ip->i_di.di_mode)); + IF2DT(ip->i_inode.i_mode)); if (error) goto out_end_trans; @@ -867,6 +835,10 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd) * @mask: * @nd: passed from Linux VFS, ignored by us * + * This may be called from the VFS directly, or from within GFS2 with the + * inode locked, so we look to see if the glock is already locked and only + * lock the glock if its not already been done. + * * Returns: errno */ @@ -875,15 +847,18 @@ static int gfs2_permission(struct inode *inode, int mask, struct nameidata *nd) struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder i_gh; int error; + int unlock = 0; - if (ip->i_vn == ip->i_gl->gl_vn) - return generic_permission(inode, mask, gfs2_check_acl); + if (gfs2_glock_is_locked_by_me(ip->i_gl) == 0) { + error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); + if (error) + return error; + unlock = 1; + } - error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); - if (!error) { - error = generic_permission(inode, mask, gfs2_check_acl_locked); + error = generic_permission(inode, mask, gfs2_check_acl); + if (unlock) gfs2_glock_dq_uninit(&i_gh); - } return error; } @@ -914,8 +889,8 @@ static int setattr_chown(struct inode *inode, struct iattr *attr) u32 ouid, ogid, nuid, ngid; int error; - ouid = ip->i_di.di_uid; - ogid = ip->i_di.di_gid; + ouid = inode->i_uid; + ogid = inode->i_gid; nuid = attr->ia_uid; ngid = attr->ia_gid; @@ -946,10 +921,9 @@ static int setattr_chown(struct inode *inode, struct iattr *attr) error = inode_setattr(inode, attr); gfs2_assert_warn(sdp, !error); - gfs2_inode_attr_out(ip); gfs2_trans_add_bh(ip->i_gl, dibh, 1); - gfs2_dinode_out(&ip->i_di, dibh->b_data); + gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); if (ouid != NO_QUOTA_CHANGE || ogid != NO_QUOTA_CHANGE) { @@ -1018,6 +992,12 @@ out: * @dentry: The dentry to stat * @stat: The inode's stats * + * This may be called from the VFS directly, or from within GFS2 with the + * inode locked, so we look to see if the glock is already locked and only + * lock the glock if its not already been done. Note that its the NFS + * readdirplus operation which causes this to be called (from filldir) + * with the glock already held. + * * Returns: errno */ @@ -1028,14 +1008,20 @@ static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int error; + int unlock = 0; - error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); - if (!error) { - generic_fillattr(inode, stat); - gfs2_glock_dq_uninit(&gh); + if (gfs2_glock_is_locked_by_me(ip->i_gl) == 0) { + error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); + if (error) + return error; + unlock = 1; } - return error; + generic_fillattr(inode, stat); + if (unlock); + gfs2_glock_dq_uninit(&gh); + + return 0; } static int gfs2_setxattr(struct dentry *dentry, const char *name, diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c index 06f06f7773d0..7685b46f934b 100644 --- a/fs/gfs2/ops_super.c +++ b/fs/gfs2/ops_super.c @@ -138,16 +138,28 @@ static void gfs2_put_super(struct super_block *sb) } /** - * gfs2_write_super - disk commit all incore transactions - * @sb: the filesystem + * gfs2_write_super + * @sb: the superblock * - * This function is called every time sync(2) is called. - * After this exits, all dirty buffers are synced. */ static void gfs2_write_super(struct super_block *sb) { - gfs2_log_flush(sb->s_fs_info, NULL); + sb->s_dirt = 0; +} + +/** + * gfs2_sync_fs - sync the filesystem + * @sb: the superblock + * + * Flushes the log to disk. + */ +static int gfs2_sync_fs(struct super_block *sb, int wait) +{ + sb->s_dirt = 0; + if (wait) + gfs2_log_flush(sb->s_fs_info, NULL); + return 0; } /** @@ -204,7 +216,7 @@ static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_inode->i_sb; struct gfs2_sbd *sdp = sb->s_fs_info; - struct gfs2_statfs_change sc; + struct gfs2_statfs_change_host sc; int error; if (gfs2_tune_get(sdp, gt_statfs_slow)) @@ -282,8 +294,6 @@ static void gfs2_clear_inode(struct inode *inode) */ if (inode->i_private) { struct gfs2_inode *ip = GFS2_I(inode); - gfs2_glock_inode_squish(inode); - gfs2_assert(inode->i_sb->s_fs_info, ip->i_gl->gl_state == LM_ST_UNLOCKED); ip->i_gl->gl_object = NULL; gfs2_glock_schedule_for_reclaim(ip->i_gl); gfs2_glock_put(ip->i_gl); @@ -384,7 +394,7 @@ static void gfs2_delete_inode(struct inode *inode) if (!inode->i_private) goto out; - error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &gh); + error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB, &gh); if (unlikely(error)) { gfs2_glock_dq_uninit(&ip->i_iopen_gh); goto out; @@ -396,7 +406,7 @@ static void gfs2_delete_inode(struct inode *inode) if (error) goto out_uninit; - if (S_ISDIR(ip->i_di.di_mode) && + if (S_ISDIR(inode->i_mode) && (ip->i_di.di_flags & GFS2_DIF_EXHASH)) { error = gfs2_dir_exhash_dealloc(ip); if (error) @@ -452,17 +462,18 @@ static void gfs2_destroy_inode(struct inode *inode) } struct super_operations gfs2_super_ops = { - .alloc_inode = gfs2_alloc_inode, - .destroy_inode = gfs2_destroy_inode, - .write_inode = gfs2_write_inode, - .delete_inode = gfs2_delete_inode, - .put_super = gfs2_put_super, - .write_super = gfs2_write_super, - .write_super_lockfs = gfs2_write_super_lockfs, - .unlockfs = gfs2_unlockfs, - .statfs = gfs2_statfs, - .remount_fs = gfs2_remount_fs, - .clear_inode = gfs2_clear_inode, - .show_options = gfs2_show_options, + .alloc_inode = gfs2_alloc_inode, + .destroy_inode = gfs2_destroy_inode, + .write_inode = gfs2_write_inode, + .delete_inode = gfs2_delete_inode, + .put_super = gfs2_put_super, + .write_super = gfs2_write_super, + .sync_fs = gfs2_sync_fs, + .write_super_lockfs = gfs2_write_super_lockfs, + .unlockfs = gfs2_unlockfs, + .statfs = gfs2_statfs, + .remount_fs = gfs2_remount_fs, + .clear_inode = gfs2_clear_inode, + .show_options = gfs2_show_options, }; diff --git a/fs/gfs2/ops_vm.c b/fs/gfs2/ops_vm.c index 5453d2947ab3..45a5f11fc39a 100644 --- a/fs/gfs2/ops_vm.c +++ b/fs/gfs2/ops_vm.c @@ -76,7 +76,7 @@ static int alloc_page_backing(struct gfs2_inode *ip, struct page *page) if (error) goto out; - error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid); + error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid); if (error) goto out_gunlock_q; diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index a3deae7416c9..d0db881b55d2 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -452,19 +452,19 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid) if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) return 0; - error = qdsb_get(sdp, QUOTA_USER, ip->i_di.di_uid, CREATE, qd); + error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd); if (error) goto out; al->al_qd_num++; qd++; - error = qdsb_get(sdp, QUOTA_GROUP, ip->i_di.di_gid, CREATE, qd); + error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd); if (error) goto out; al->al_qd_num++; qd++; - if (uid != NO_QUOTA_CHANGE && uid != ip->i_di.di_uid) { + if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) { error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd); if (error) goto out; @@ -472,7 +472,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid) qd++; } - if (gid != NO_QUOTA_CHANGE && gid != ip->i_di.di_gid) { + if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) { error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd); if (error) goto out; @@ -539,8 +539,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change) qc->qc_id = cpu_to_be32(qd->qd_id); } - x = qc->qc_change; - x = be64_to_cpu(x) + change; + x = be64_to_cpu(qc->qc_change) + change; qc->qc_change = cpu_to_be64(x); spin_lock(&sdp->sd_quota_spin); @@ -743,7 +742,7 @@ static int do_glock(struct gfs2_quota_data *qd, int force_refresh, struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); struct gfs2_holder i_gh; - struct gfs2_quota q; + struct gfs2_quota_host q; char buf[sizeof(struct gfs2_quota)]; struct file_ra_state ra_state; int error; @@ -1103,7 +1102,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots; y++, slot++) { - struct gfs2_quota_change qc; + struct gfs2_quota_change_host qc; struct gfs2_quota_data *qd; gfs2_quota_change_in(&qc, bh->b_data + diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index 62cd223819b7..d0c806b85c86 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c @@ -132,10 +132,11 @@ void gfs2_revoke_clean(struct gfs2_sbd *sdp) */ static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk, - struct gfs2_log_header *head) + struct gfs2_log_header_host *head) { struct buffer_head *bh; - struct gfs2_log_header lh; + struct gfs2_log_header_host lh; + const u32 nothing = 0; u32 hash; int error; @@ -143,11 +144,11 @@ static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk, if (error) return error; - memcpy(&lh, bh->b_data, sizeof(struct gfs2_log_header)); - lh.lh_hash = 0; - hash = gfs2_disk_hash((char *)&lh, sizeof(struct gfs2_log_header)); + hash = crc32_le((u32)~0, bh->b_data, sizeof(struct gfs2_log_header) - + sizeof(u32)); + hash = crc32_le(hash, (unsigned char const *)¬hing, sizeof(nothing)); + hash ^= (u32)~0; gfs2_log_header_in(&lh, bh->b_data); - brelse(bh); if (lh.lh_header.mh_magic != GFS2_MAGIC || @@ -174,7 +175,7 @@ static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk, */ static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk, - struct gfs2_log_header *head) + struct gfs2_log_header_host *head) { unsigned int orig_blk = *blk; int error; @@ -205,10 +206,10 @@ static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk, * Returns: errno */ -static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header *head) +static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) { unsigned int blk = head->lh_blkno; - struct gfs2_log_header lh; + struct gfs2_log_header_host lh; int error; for (;;) { @@ -245,9 +246,9 @@ static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header *head) * Returns: errno */ -int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header *head) +int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) { - struct gfs2_log_header lh_1, lh_m; + struct gfs2_log_header_host lh_1, lh_m; u32 blk_1, blk_2, blk_m; int error; @@ -320,7 +321,7 @@ static int foreach_descriptor(struct gfs2_jdesc *jd, unsigned int start, length = be32_to_cpu(ld->ld_length); if (be32_to_cpu(ld->ld_header.mh_type) == GFS2_METATYPE_LH) { - struct gfs2_log_header lh; + struct gfs2_log_header_host lh; error = get_log_header(jd, start, &lh); if (!error) { gfs2_replay_incr_blk(sdp, &start); @@ -363,7 +364,7 @@ static int foreach_descriptor(struct gfs2_jdesc *jd, unsigned int start, * Returns: errno */ -static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header *head) +static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) { struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); @@ -425,7 +426,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd) { struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); - struct gfs2_log_header head; + struct gfs2_log_header_host head; struct gfs2_holder j_gh, ji_gh, t_gh; unsigned long t; int ro = 0; diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h index 961feedf4d8b..f7235e61c723 100644 --- a/fs/gfs2/recovery.h +++ b/fs/gfs2/recovery.h @@ -26,7 +26,7 @@ int gfs2_revoke_check(struct gfs2_sbd *sdp, u64 blkno, unsigned int where); void gfs2_revoke_clean(struct gfs2_sbd *sdp); int gfs2_find_jhead(struct gfs2_jdesc *jd, - struct gfs2_log_header *head); + struct gfs2_log_header_host *head); int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd); void gfs2_check_journals(struct gfs2_sbd *sdp); diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index b261385c0065..ff0846528d54 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -253,7 +253,7 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd) } -static inline int rgrp_contains_block(struct gfs2_rindex *ri, u64 block) +static inline int rgrp_contains_block(struct gfs2_rindex_host *ri, u64 block) { u64 first = ri->ri_data0; u64 last = first + ri->ri_data; @@ -1217,7 +1217,7 @@ u64 gfs2_alloc_data(struct gfs2_inode *ip) al->al_alloced++; gfs2_statfs_change(sdp, 0, -1, 0); - gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid); + gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid); spin_lock(&sdp->sd_rindex_spin); rgd->rd_free_clone--; @@ -1261,7 +1261,7 @@ u64 gfs2_alloc_meta(struct gfs2_inode *ip) al->al_alloced++; gfs2_statfs_change(sdp, 0, -1, 0); - gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid); + gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid); gfs2_trans_add_unrevoke(sdp, block); spin_lock(&sdp->sd_rindex_spin); @@ -1337,8 +1337,7 @@ void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen) gfs2_trans_add_rg(rgd); gfs2_statfs_change(sdp, 0, +blen, 0); - gfs2_quota_change(ip, -(s64)blen, - ip->i_di.di_uid, ip->i_di.di_gid); + gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); } /** @@ -1366,7 +1365,7 @@ void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen) gfs2_trans_add_rg(rgd); gfs2_statfs_change(sdp, 0, +blen, 0); - gfs2_quota_change(ip, -(s64)blen, ip->i_di.di_uid, ip->i_di.di_gid); + gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); gfs2_meta_wipe(ip, bstart, blen); } @@ -1411,7 +1410,7 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno) void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) { gfs2_free_uninit_di(rgd, ip->i_num.no_addr); - gfs2_quota_change(ip, -1, ip->i_di.di_uid, ip->i_di.di_gid); + gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid); gfs2_meta_wipe(ip, ip->i_num.no_addr, 1); } diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 6a78b1b32e25..43a24f2e5905 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -97,7 +97,7 @@ void gfs2_tune_init(struct gfs2_tune *gt) * changed. */ -int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb *sb, int silent) +int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent) { unsigned int x; @@ -180,6 +180,24 @@ static int end_bio_io_page(struct bio *bio, unsigned int bytes_done, int error) return 0; } +/** + * gfs2_read_super - Read the gfs2 super block from disk + * @sb: The VFS super block + * @sector: The location of the super block + * + * This uses the bio functions to read the super block from disk + * because we want to be 100% sure that we never read cached data. + * A super block is read twice only during each GFS2 mount and is + * never written to by the filesystem. The first time its read no + * locks are held, and the only details which are looked at are those + * relating to the locking protocol. Once locking is up and working, + * the sb is read again under the lock to establish the location of + * the master directory (contains pointers to journals etc) and the + * root directory. + * + * Returns: A page containing the sb or NULL + */ + struct page *gfs2_read_super(struct super_block *sb, sector_t sector) { struct page *page; @@ -199,7 +217,7 @@ struct page *gfs2_read_super(struct super_block *sb, sector_t sector) return NULL; } - bio->bi_sector = sector; + bio->bi_sector = sector * (sb->s_blocksize >> 9); bio->bi_bdev = sb->s_bdev; bio_add_page(bio, page, PAGE_SIZE, 0); @@ -508,7 +526,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); struct gfs2_glock *j_gl = ip->i_gl; struct gfs2_holder t_gh; - struct gfs2_log_header head; + struct gfs2_log_header_host head; int error; error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, @@ -517,7 +535,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) return error; gfs2_meta_cache_flush(ip); - j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA); + j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); error = gfs2_find_jhead(sdp->sd_jdesc, &head); if (error) @@ -587,9 +605,9 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp) int gfs2_statfs_init(struct gfs2_sbd *sdp) { struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); - struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master; + struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); - struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local; + struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; struct buffer_head *m_bh, *l_bh; struct gfs2_holder gh; int error; @@ -634,7 +652,7 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, s64 dinodes) { struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); - struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local; + struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; struct buffer_head *l_bh; int error; @@ -660,8 +678,8 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp) { struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); - struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master; - struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local; + struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; + struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; struct gfs2_holder gh; struct buffer_head *m_bh, *l_bh; int error; @@ -727,10 +745,10 @@ out: * Returns: errno */ -int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc) +int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) { - struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master; - struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local; + struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; + struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; spin_lock(&sdp->sd_statfs_spin); @@ -760,7 +778,7 @@ int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc) */ static int statfs_slow_fill(struct gfs2_rgrpd *rgd, - struct gfs2_statfs_change *sc) + struct gfs2_statfs_change_host *sc) { gfs2_rgrp_verify(rgd); sc->sc_total += rgd->rd_ri.ri_data; @@ -782,7 +800,7 @@ static int statfs_slow_fill(struct gfs2_rgrpd *rgd, * Returns: errno */ -int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc) +int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) { struct gfs2_holder ri_gh; struct gfs2_rgrpd *rgd_next; @@ -792,7 +810,7 @@ int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc) int done; int error = 0, err; - memset(sc, 0, sizeof(struct gfs2_statfs_change)); + memset(sc, 0, sizeof(struct gfs2_statfs_change_host)); gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL); if (!gha) return -ENOMEM; @@ -873,7 +891,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd; struct lfcc *lfcc; LIST_HEAD(list); - struct gfs2_log_header lh; + struct gfs2_log_header_host lh; int error; error = gfs2_jindex_hold(sdp, &ji_gh); diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h index 5bb443ae0f59..e590b2df11dc 100644 --- a/fs/gfs2/super.h +++ b/fs/gfs2/super.h @@ -14,7 +14,7 @@ void gfs2_tune_init(struct gfs2_tune *gt); -int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb *sb, int silent); +int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent); int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent); struct page *gfs2_read_super(struct super_block *sb, sector_t sector); @@ -45,8 +45,8 @@ int gfs2_statfs_init(struct gfs2_sbd *sdp); void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, s64 dinodes); int gfs2_statfs_sync(struct gfs2_sbd *sdp); -int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc); -int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc); +int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc); +int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc); int gfs2_freeze_fs(struct gfs2_sbd *sdp); void gfs2_unfreeze_fs(struct gfs2_sbd *sdp); diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index 0e0ec988f731..983eaf1e06be 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c @@ -426,9 +426,6 @@ static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\ } \ TUNE_ATTR_2(name, name##_store) -TUNE_ATTR(ilimit, 0); -TUNE_ATTR(ilimit_tries, 0); -TUNE_ATTR(ilimit_min, 0); TUNE_ATTR(demote_secs, 0); TUNE_ATTR(incore_log_blocks, 0); TUNE_ATTR(log_flush_secs, 0); @@ -447,7 +444,6 @@ TUNE_ATTR(quota_simul_sync, 1); TUNE_ATTR(quota_cache_secs, 1); TUNE_ATTR(max_atomic_write, 1); TUNE_ATTR(stall_secs, 1); -TUNE_ATTR(entries_per_readdir, 1); TUNE_ATTR(greedy_default, 1); TUNE_ATTR(greedy_quantum, 1); TUNE_ATTR(greedy_max, 1); @@ -459,9 +455,6 @@ TUNE_ATTR_DAEMON(quotad_secs, quotad_process); TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); static struct attribute *tune_attrs[] = { - &tune_attr_ilimit.attr, - &tune_attr_ilimit_tries.attr, - &tune_attr_ilimit_min.attr, &tune_attr_demote_secs.attr, &tune_attr_incore_log_blocks.attr, &tune_attr_log_flush_secs.attr, @@ -478,7 +471,6 @@ static struct attribute *tune_attrs[] = { &tune_attr_quota_cache_secs.attr, &tune_attr_max_atomic_write.attr, &tune_attr_stall_secs.attr, - &tune_attr_entries_per_readdir.attr, &tune_attr_greedy_default.attr, &tune_attr_greedy_quantum.attr, &tune_attr_greedy_max.attr, diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c index 196c604faadc..e5707a9f78c2 100644 --- a/fs/gfs2/util.c +++ b/fs/gfs2/util.c @@ -23,9 +23,9 @@ #include "lm.h" #include "util.h" -kmem_cache_t *gfs2_glock_cachep __read_mostly; -kmem_cache_t *gfs2_inode_cachep __read_mostly; -kmem_cache_t *gfs2_bufdata_cachep __read_mostly; +struct kmem_cache *gfs2_glock_cachep __read_mostly; +struct kmem_cache *gfs2_inode_cachep __read_mostly; +struct kmem_cache *gfs2_bufdata_cachep __read_mostly; void gfs2_assert_i(struct gfs2_sbd *sdp) { diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h index 76a50899fe9e..28938a46cf47 100644 --- a/fs/gfs2/util.h +++ b/fs/gfs2/util.h @@ -83,8 +83,7 @@ static inline int gfs2_meta_check_i(struct gfs2_sbd *sdp, char *file, unsigned int line) { struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data; - u32 magic = mh->mh_magic; - magic = be32_to_cpu(magic); + u32 magic = be32_to_cpu(mh->mh_magic); if (unlikely(magic != GFS2_MAGIC)) return gfs2_meta_check_ii(sdp, bh, "magic number", function, file, line); @@ -107,9 +106,8 @@ static inline int gfs2_metatype_check_i(struct gfs2_sbd *sdp, char *file, unsigned int line) { struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data; - u32 magic = mh->mh_magic; + u32 magic = be32_to_cpu(mh->mh_magic); u16 t = be32_to_cpu(mh->mh_type); - magic = be32_to_cpu(magic); if (unlikely(magic != GFS2_MAGIC)) return gfs2_meta_check_ii(sdp, bh, "magic number", function, file, line); @@ -146,9 +144,9 @@ int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh, gfs2_io_error_bh_i((sdp), (bh), __FUNCTION__, __FILE__, __LINE__); -extern kmem_cache_t *gfs2_glock_cachep; -extern kmem_cache_t *gfs2_inode_cachep; -extern kmem_cache_t *gfs2_bufdata_cachep; +extern struct kmem_cache *gfs2_glock_cachep; +extern struct kmem_cache *gfs2_inode_cachep; +extern struct kmem_cache *gfs2_bufdata_cachep; static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt, unsigned int *p) diff --git a/fs/hfs/super.c b/fs/hfs/super.c index d43b4fcc8ad3..a36987966004 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c @@ -24,7 +24,7 @@ #include "hfs_fs.h" #include "btree.h" -static kmem_cache_t *hfs_inode_cachep; +static struct kmem_cache *hfs_inode_cachep; MODULE_LICENSE("GPL"); @@ -145,7 +145,7 @@ static struct inode *hfs_alloc_inode(struct super_block *sb) { struct hfs_inode_info *i; - i = kmem_cache_alloc(hfs_inode_cachep, SLAB_KERNEL); + i = kmem_cache_alloc(hfs_inode_cachep, GFP_KERNEL); return i ? &i->vfs_inode : NULL; } @@ -390,11 +390,13 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) hfs_find_exit(&fd); goto bail_no_root; } + res = -EINVAL; root_inode = hfs_iget(sb, &fd.search_key->cat, &rec); hfs_find_exit(&fd); if (!root_inode) goto bail_no_root; + res = -ENOMEM; sb->s_root = d_alloc_root(root_inode); if (!sb->s_root) goto bail_iput; @@ -428,7 +430,7 @@ static struct file_system_type hfs_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; -static void hfs_init_once(void *p, kmem_cache_t *cachep, unsigned long flags) +static void hfs_init_once(void *p, struct kmem_cache *cachep, unsigned long flags) { struct hfs_inode_info *i = p; diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 194eede52fa4..0f513c6bf843 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -434,13 +434,13 @@ MODULE_AUTHOR("Brad Boyer"); MODULE_DESCRIPTION("Extended Macintosh Filesystem"); MODULE_LICENSE("GPL"); -static kmem_cache_t *hfsplus_inode_cachep; +static struct kmem_cache *hfsplus_inode_cachep; static struct inode *hfsplus_alloc_inode(struct super_block *sb) { struct hfsplus_inode_info *i; - i = kmem_cache_alloc(hfsplus_inode_cachep, SLAB_KERNEL); + i = kmem_cache_alloc(hfsplus_inode_cachep, GFP_KERNEL); return i ? &i->vfs_inode : NULL; } @@ -467,7 +467,7 @@ static struct file_system_type hfsplus_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; -static void hfsplus_init_once(void *p, kmem_cache_t *cachep, unsigned long flags) +static void hfsplus_init_once(void *p, struct kmem_cache *cachep, unsigned long flags) { struct hfsplus_inode_info *i = p; diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c index ecc9180645ae..594f9c428fc2 100644 --- a/fs/hpfs/dir.c +++ b/fs/hpfs/dir.c @@ -84,7 +84,8 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) } if (!fno->dirflag) { e = 1; - hpfs_error(inode->i_sb, "not a directory, fnode %08x",inode->i_ino); + hpfs_error(inode->i_sb, "not a directory, fnode %08lx", + (unsigned long)inode->i_ino); } if (hpfs_inode->i_dno != fno->u.external[0].disk_secno) { e = 1; @@ -144,8 +145,11 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) } if (de->first || de->last) { if (hpfs_sb(inode->i_sb)->sb_chk) { - if (de->first && !de->last && (de->namelen != 2 || de ->name[0] != 1 || de->name[1] != 1)) hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08x", old_pos); - if (de->last && (de->namelen != 1 || de ->name[0] != 255)) hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08x", old_pos); + if (de->first && !de->last && (de->namelen != 2 + || de ->name[0] != 1 || de->name[1] != 1)) + hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08lx", old_pos); + if (de->last && (de->namelen != 1 || de ->name[0] != 255)) + hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08lx", old_pos); } hpfs_brelse4(&qbh); goto again; diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c index 229ff2fb1809..fe83c2b7d2d8 100644 --- a/fs/hpfs/dnode.c +++ b/fs/hpfs/dnode.c @@ -533,10 +533,13 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno) struct buffer_head *bh; struct dnode *d1; struct quad_buffer_head qbh1; - if (hpfs_sb(i->i_sb)->sb_chk) if (up != i->i_ino) { - hpfs_error(i->i_sb, "bad pointer to fnode, dnode %08x, pointing to %08x, should be %08x", dno, up, i->i_ino); + if (hpfs_sb(i->i_sb)->sb_chk) + if (up != i->i_ino) { + hpfs_error(i->i_sb, + "bad pointer to fnode, dnode %08x, pointing to %08x, should be %08lx", + dno, up, (unsigned long)i->i_ino); return; - } + } if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { d1->up = up; d1->root_dnode = 1; @@ -851,7 +854,9 @@ struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp, /* Going to the next dirent */ if ((d = de_next_de(de)) < dnode_end_de(dnode)) { if (!(++*posp & 077)) { - hpfs_error(inode->i_sb, "map_pos_dirent: pos crossed dnode boundary; pos = %08x", *posp); + hpfs_error(inode->i_sb, + "map_pos_dirent: pos crossed dnode boundary; pos = %08llx", + (unsigned long long)*posp); goto bail; } /* We're going down the tree */ diff --git a/fs/hpfs/ea.c b/fs/hpfs/ea.c index 66339dc030e4..547a8384571f 100644 --- a/fs/hpfs/ea.c +++ b/fs/hpfs/ea.c @@ -243,8 +243,9 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, char *key, char *data fnode->ea_offs = 0xc4; } if (fnode->ea_offs < 0xc4 || fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200) { - hpfs_error(s, "fnode %08x: ea_offs == %03x, ea_size_s == %03x", - inode->i_ino, fnode->ea_offs, fnode->ea_size_s); + hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x", + (unsigned long)inode->i_ino, + fnode->ea_offs, fnode->ea_size_s); return; } if ((fnode->ea_size_s || !fnode->ea_size_l) && diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index 32ab51e42b96..1c07aa82d327 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -317,7 +317,8 @@ static inline struct hpfs_sb_info *hpfs_sb(struct super_block *sb) /* super.c */ -void hpfs_error(struct super_block *, char *, ...); +void hpfs_error(struct super_block *, const char *, ...) + __attribute__((format (printf, 2, 3))); int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *); unsigned hpfs_count_one_bitmap(struct super_block *, secno); diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c index 7faef8544f32..85d3e1d9ac00 100644 --- a/fs/hpfs/inode.c +++ b/fs/hpfs/inode.c @@ -251,7 +251,10 @@ void hpfs_write_inode_nolock(struct inode *i) de->file_size = 0; hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); - } else hpfs_error(i->i_sb, "directory %08x doesn't have '.' entry", i->i_ino); + } else + hpfs_error(i->i_sb, + "directory %08lx doesn't have '.' entry", + (unsigned long)i->i_ino); } mark_buffer_dirty(bh); brelse(bh); diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c index 0fecdac22e4e..c4724589b2eb 100644 --- a/fs/hpfs/map.c +++ b/fs/hpfs/map.c @@ -126,32 +126,40 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea struct extended_attribute *ea; struct extended_attribute *ea_end; if (fnode->magic != FNODE_MAGIC) { - hpfs_error(s, "bad magic on fnode %08x", ino); + hpfs_error(s, "bad magic on fnode %08lx", + (unsigned long)ino); goto bail; } if (!fnode->dirflag) { if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes != (fnode->btree.internal ? 12 : 8)) { - hpfs_error(s, "bad number of nodes in fnode %08x", ino); + hpfs_error(s, + "bad number of nodes in fnode %08lx", + (unsigned long)ino); goto bail; } if (fnode->btree.first_free != 8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) { - hpfs_error(s, "bad first_free pointer in fnode %08x", ino); + hpfs_error(s, + "bad first_free pointer in fnode %08lx", + (unsigned long)ino); goto bail; } } if (fnode->ea_size_s && ((signed int)fnode->ea_offs < 0xc4 || (signed int)fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200)) { - hpfs_error(s, "bad EA info in fnode %08x: ea_offs == %04x ea_size_s == %04x", - ino, fnode->ea_offs, fnode->ea_size_s); + hpfs_error(s, + "bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x", + (unsigned long)ino, + fnode->ea_offs, fnode->ea_size_s); goto bail; } ea = fnode_ea(fnode); ea_end = fnode_end_ea(fnode); while (ea != ea_end) { if (ea > ea_end) { - hpfs_error(s, "bad EA in fnode %08x", ino); + hpfs_error(s, "bad EA in fnode %08lx", + (unsigned long)ino); goto bail; } ea = next_ea(ea); diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 450b5e0b4785..d4abc1a1d566 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -46,21 +46,17 @@ static void unmark_dirty(struct super_block *s) } /* Filesystem error... */ +static char err_buf[1024]; -#define ERR_BUF_SIZE 1024 - -void hpfs_error(struct super_block *s, char *m,...) +void hpfs_error(struct super_block *s, const char *fmt, ...) { - char *buf; - va_list l; - va_start(l, m); - if (!(buf = kmalloc(ERR_BUF_SIZE, GFP_KERNEL))) - printk("HPFS: No memory for error message '%s'\n",m); - else if (vsprintf(buf, m, l) >= ERR_BUF_SIZE) - printk("HPFS: Grrrr... Kernel memory corrupted ... going on, but it'll crash very soon :-(\n"); - printk("HPFS: filesystem error: "); - if (buf) printk("%s", buf); - else printk("%s\n",m); + va_list args; + + va_start(args, fmt); + vsnprintf(err_buf, sizeof(err_buf), fmt, args); + va_end(args); + + printk("HPFS: filesystem error: %s", err_buf); if (!hpfs_sb(s)->sb_was_error) { if (hpfs_sb(s)->sb_err == 2) { printk("; crashing the system because you wanted it\n"); @@ -76,7 +72,6 @@ void hpfs_error(struct super_block *s, char *m,...) } else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n"); else printk("; corrupted filesystem mounted read/write - your computer will explode within 20 seconds ... but you wanted it so!\n"); } else printk("\n"); - kfree(buf); hpfs_sb(s)->sb_was_error = 1; } @@ -160,12 +155,12 @@ static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf) return 0; } -static kmem_cache_t * hpfs_inode_cachep; +static struct kmem_cache * hpfs_inode_cachep; static struct inode *hpfs_alloc_inode(struct super_block *sb) { struct hpfs_inode_info *ei; - ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, SLAB_NOFS); + ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS); if (!ei) return NULL; ei->vfs_inode.i_version = 1; @@ -177,7 +172,7 @@ static void hpfs_destroy_inode(struct inode *inode) kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 0bea6a619e10..0706f5aac6a2 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -62,24 +62,19 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) loff_t len, vma_len; int ret; - if (vma->vm_pgoff & (HPAGE_SIZE / PAGE_SIZE - 1)) - return -EINVAL; - - if (vma->vm_start & ~HPAGE_MASK) - return -EINVAL; - - if (vma->vm_end & ~HPAGE_MASK) - return -EINVAL; - - if (vma->vm_end - vma->vm_start < HPAGE_SIZE) - return -EINVAL; + /* + * vma alignment has already been checked by prepare_hugepage_range. + * If you add any error returns here, do so after setting VM_HUGETLB, + * so is_vm_hugetlb_page tests below unmap_region go the right way + * when do_mmap_pgoff unwinds (may be important on powerpc and ia64). + */ + vma->vm_flags |= VM_HUGETLB | VM_RESERVED; + vma->vm_ops = &hugetlb_vm_ops; vma_len = (loff_t)(vma->vm_end - vma->vm_start); mutex_lock(&inode->i_mutex); file_accessed(file); - vma->vm_flags |= VM_HUGETLB | VM_RESERVED; - vma->vm_ops = &hugetlb_vm_ops; ret = -ENOMEM; len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); @@ -518,7 +513,7 @@ static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) } -static kmem_cache_t *hugetlbfs_inode_cachep; +static struct kmem_cache *hugetlbfs_inode_cachep; static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) { @@ -527,7 +522,7 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) return NULL; - p = kmem_cache_alloc(hugetlbfs_inode_cachep, SLAB_KERNEL); + p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL); if (unlikely(!p)) { hugetlbfs_inc_free_inodes(sbinfo); return NULL; @@ -550,7 +545,7 @@ static const struct address_space_operations hugetlbfs_aops = { }; -static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) +static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; diff --git a/fs/inode.c b/fs/inode.c index 26cdb115ce67..9ecccab7326d 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -97,7 +97,7 @@ static DEFINE_MUTEX(iprune_mutex); */ struct inodes_stat_t inodes_stat; -static kmem_cache_t * inode_cachep __read_mostly; +static struct kmem_cache * inode_cachep __read_mostly; static struct inode *alloc_inode(struct super_block *sb) { @@ -109,7 +109,7 @@ static struct inode *alloc_inode(struct super_block *sb) if (sb->s_op->alloc_inode) inode = sb->s_op->alloc_inode(sb); else - inode = (struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL); + inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL); if (inode) { struct address_space * const mapping = &inode->i_data; @@ -209,7 +209,7 @@ void inode_init_once(struct inode *inode) EXPORT_SYMBOL(inode_init_once); -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct inode * inode = (struct inode *) foo; @@ -1242,9 +1242,6 @@ EXPORT_SYMBOL(inode_needs_sync); */ #ifdef CONFIG_QUOTA -/* Function back in dquot.c */ -int remove_inode_dquot_ref(struct inode *, int, struct list_head *); - void remove_dquot_ref(struct super_block *sb, int type, struct list_head *tofree_head) { diff --git a/fs/inotify.c b/fs/inotify.c index 723836a1f718..f5099d86fd91 100644 --- a/fs/inotify.c +++ b/fs/inotify.c @@ -27,6 +27,7 @@ #include <linux/idr.h> #include <linux/slab.h> #include <linux/fs.h> +#include <linux/sched.h> #include <linux/init.h> #include <linux/list.h> #include <linux/writeback.h> diff --git a/fs/inotify_user.c b/fs/inotify_user.c index 017cb0f134d6..e1956e6f116c 100644 --- a/fs/inotify_user.c +++ b/fs/inotify_user.c @@ -34,8 +34,8 @@ #include <asm/ioctls.h> -static kmem_cache_t *watch_cachep __read_mostly; -static kmem_cache_t *event_cachep __read_mostly; +static struct kmem_cache *watch_cachep __read_mostly; +static struct kmem_cache *event_cachep __read_mostly; static struct vfsmount *inotify_mnt __read_mostly; diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index c34b862cdbf2..ea55b6c469ec 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -57,12 +57,12 @@ static void isofs_put_super(struct super_block *sb) static void isofs_read_inode(struct inode *); static int isofs_statfs (struct dentry *, struct kstatfs *); -static kmem_cache_t *isofs_inode_cachep; +static struct kmem_cache *isofs_inode_cachep; static struct inode *isofs_alloc_inode(struct super_block *sb) { struct iso_inode_info *ei; - ei = kmem_cache_alloc(isofs_inode_cachep, SLAB_KERNEL); + ei = kmem_cache_alloc(isofs_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -73,7 +73,7 @@ static void isofs_destroy_inode(struct inode *inode) kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode)); } -static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) { struct iso_inode_info *ei = foo; diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index b85c686b60db..10fff9443938 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -31,7 +31,7 @@ #include <linux/smp_lock.h> #include <linux/init.h> #include <linux/mm.h> -#include <linux/suspend.h> +#include <linux/freezer.h> #include <linux/pagemap.h> #include <linux/kthread.h> #include <linux/poison.h> @@ -1630,7 +1630,7 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry) #define JBD_MAX_SLABS 5 #define JBD_SLAB_INDEX(size) (size >> 11) -static kmem_cache_t *jbd_slab[JBD_MAX_SLABS]; +static struct kmem_cache *jbd_slab[JBD_MAX_SLABS]; static const char *jbd_slab_names[JBD_MAX_SLABS] = { "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k" }; @@ -1693,7 +1693,7 @@ void jbd_slab_free(void *ptr, size_t size) /* * Journal_head storage management */ -static kmem_cache_t *journal_head_cache; +static struct kmem_cache *journal_head_cache; #ifdef CONFIG_JBD_DEBUG static atomic_t nr_journal_heads = ATOMIC_INIT(0); #endif @@ -1996,7 +1996,7 @@ static void __exit remove_jbd_proc_entry(void) #endif -kmem_cache_t *jbd_handle_cache; +struct kmem_cache *jbd_handle_cache; static int __init journal_init_handle_cache(void) { diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c index c532429d8d9b..d204ab394f36 100644 --- a/fs/jbd/revoke.c +++ b/fs/jbd/revoke.c @@ -70,8 +70,8 @@ #include <linux/init.h> #endif -static kmem_cache_t *revoke_record_cache; -static kmem_cache_t *revoke_table_cache; +static struct kmem_cache *revoke_record_cache; +static struct kmem_cache *revoke_table_cache; /* Each revoke record represents one single revoked block. During journal replay, this involves recording the transaction ID of the diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 4f82bcd63e48..d38e0d575e48 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -27,6 +27,8 @@ #include <linux/mm.h> #include <linux/highmem.h> +static void __journal_temp_unlink_buffer(struct journal_head *jh); + /* * get_transaction: obtain a new transaction_t object. * @@ -1499,7 +1501,7 @@ __blist_del_buffer(struct journal_head **list, struct journal_head *jh) * * Called under j_list_lock. The journal may not be locked. */ -void __journal_temp_unlink_buffer(struct journal_head *jh) +static void __journal_temp_unlink_buffer(struct journal_head *jh) { struct journal_head **list = NULL; transaction_t *transaction; diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 70b2ae1ef281..6bd8005e3d34 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -248,8 +248,12 @@ write_out_data: bufs = 0; goto write_out_data; } - } - else { + } else if (!locked && buffer_locked(bh)) { + __jbd2_journal_file_buffer(jh, commit_transaction, + BJ_Locked); + jbd_unlock_bh_state(bh); + put_bh(bh); + } else { BUFFER_TRACE(bh, "writeout complete: unfile"); __jbd2_journal_unfile_buffer(jh); jbd_unlock_bh_state(bh); diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index c60f378b0f76..44fc32bfd7f1 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -31,7 +31,7 @@ #include <linux/smp_lock.h> #include <linux/init.h> #include <linux/mm.h> -#include <linux/suspend.h> +#include <linux/freezer.h> #include <linux/pagemap.h> #include <linux/kthread.h> #include <linux/poison.h> @@ -1641,7 +1641,7 @@ void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry) #define JBD_MAX_SLABS 5 #define JBD_SLAB_INDEX(size) (size >> 11) -static kmem_cache_t *jbd_slab[JBD_MAX_SLABS]; +static struct kmem_cache *jbd_slab[JBD_MAX_SLABS]; static const char *jbd_slab_names[JBD_MAX_SLABS] = { "jbd2_1k", "jbd2_2k", "jbd2_4k", NULL, "jbd2_8k" }; @@ -1704,7 +1704,7 @@ void jbd2_slab_free(void *ptr, size_t size) /* * Journal_head storage management */ -static kmem_cache_t *jbd2_journal_head_cache; +static struct kmem_cache *jbd2_journal_head_cache; #ifdef CONFIG_JBD_DEBUG static atomic_t nr_journal_heads = ATOMIC_INIT(0); #endif @@ -2007,7 +2007,7 @@ static void __exit jbd2_remove_jbd_proc_entry(void) #endif -kmem_cache_t *jbd2_handle_cache; +struct kmem_cache *jbd2_handle_cache; static int __init journal_init_handle_cache(void) { diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c index 380d19917f37..f506646ad0ff 100644 --- a/fs/jbd2/revoke.c +++ b/fs/jbd2/revoke.c @@ -70,8 +70,8 @@ #include <linux/init.h> #endif -static kmem_cache_t *jbd2_revoke_record_cache; -static kmem_cache_t *jbd2_revoke_table_cache; +static struct kmem_cache *jbd2_revoke_record_cache; +static struct kmem_cache *jbd2_revoke_table_cache; /* Each revoke record represents one single revoked block. During journal replay, this involves recording the transaction ID of the diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index c051a94c8a97..3a8700153cb0 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -27,6 +27,8 @@ #include <linux/mm.h> #include <linux/highmem.h> +static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh); + /* * jbd2_get_transaction: obtain a new transaction_t object. * diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c index 3f7899ea4cba..9f15bce92022 100644 --- a/fs/jffs/inode-v23.c +++ b/fs/jffs/inode-v23.c @@ -61,8 +61,8 @@ static const struct file_operations jffs_dir_operations; static struct inode_operations jffs_dir_inode_operations; static const struct address_space_operations jffs_address_operations; -kmem_cache_t *node_cache = NULL; -kmem_cache_t *fm_cache = NULL; +struct kmem_cache *node_cache = NULL; +struct kmem_cache *fm_cache = NULL; /* Called by the VFS at mount time to initialize the whole file system. */ static int jffs_fill_super(struct super_block *sb, void *data, int silent) diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c index 4a543e114970..d0e783f199ea 100644 --- a/fs/jffs/intrep.c +++ b/fs/jffs/intrep.c @@ -66,6 +66,7 @@ #include <linux/smp_lock.h> #include <linux/time.h> #include <linux/ctype.h> +#include <linux/freezer.h> #include "intrep.h" #include "jffs_fm.h" @@ -591,7 +592,7 @@ jffs_add_virtual_root(struct jffs_control *c) D2(printk("jffs_add_virtual_root(): " "Creating a virtual root directory.\n")); - if (!(root = kmalloc(sizeof(struct jffs_file), GFP_KERNEL))) { + if (!(root = kzalloc(sizeof(struct jffs_file), GFP_KERNEL))) { return -ENOMEM; } no_jffs_file++; @@ -603,7 +604,6 @@ jffs_add_virtual_root(struct jffs_control *c) DJM(no_jffs_node++); memset(node, 0, sizeof(struct jffs_node)); node->ino = JFFS_MIN_INO; - memset(root, 0, sizeof(struct jffs_file)); root->ino = JFFS_MIN_INO; root->mode = S_IFDIR | S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH; diff --git a/fs/jffs/jffs_fm.c b/fs/jffs/jffs_fm.c index 29b68d939bd9..077258b2103e 100644 --- a/fs/jffs/jffs_fm.c +++ b/fs/jffs/jffs_fm.c @@ -29,8 +29,8 @@ static int jffs_mark_obsolete(struct jffs_fmcontrol *fmc, __u32 fm_offset); static struct jffs_fm *jffs_alloc_fm(void); static void jffs_free_fm(struct jffs_fm *n); -extern kmem_cache_t *fm_cache; -extern kmem_cache_t *node_cache; +extern struct kmem_cache *fm_cache; +extern struct kmem_cache *node_cache; #if CONFIG_JFFS_FS_VERBOSE > 0 void diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c index 0ae3cd10702c..73f0d60f73a5 100644 --- a/fs/jffs2/acl.c +++ b/fs/jffs2/acl.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> +#include <linux/sched.h> #include <linux/time.h> #include <linux/crc32.h> #include <linux/jffs2.h> diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c index ff2a872e80e7..6eb3daebd563 100644 --- a/fs/jffs2/background.c +++ b/fs/jffs2/background.c @@ -16,6 +16,7 @@ #include <linux/mtd/mtd.h> #include <linux/completion.h> #include <linux/sched.h> +#include <linux/freezer.h> #include "nodelist.h" diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c index 33f291005012..83f9881ec4cc 100644 --- a/fs/jffs2/malloc.c +++ b/fs/jffs2/malloc.c @@ -19,16 +19,16 @@ /* These are initialised to NULL in the kernel startup code. If you're porting to other operating systems, beware */ -static kmem_cache_t *full_dnode_slab; -static kmem_cache_t *raw_dirent_slab; -static kmem_cache_t *raw_inode_slab; -static kmem_cache_t *tmp_dnode_info_slab; -static kmem_cache_t *raw_node_ref_slab; -static kmem_cache_t *node_frag_slab; -static kmem_cache_t *inode_cache_slab; +static struct kmem_cache *full_dnode_slab; +static struct kmem_cache *raw_dirent_slab; +static struct kmem_cache *raw_inode_slab; +static struct kmem_cache *tmp_dnode_info_slab; +static struct kmem_cache *raw_node_ref_slab; +static struct kmem_cache *node_frag_slab; +static struct kmem_cache *inode_cache_slab; #ifdef CONFIG_JFFS2_FS_XATTR -static kmem_cache_t *xattr_datum_cache; -static kmem_cache_t *xattr_ref_cache; +static struct kmem_cache *xattr_datum_cache; +static struct kmem_cache *xattr_ref_cache; #endif int __init jffs2_create_slab_caches(void) diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index bc4b8106a490..7deb78254021 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c @@ -28,12 +28,12 @@ static void jffs2_put_super(struct super_block *); -static kmem_cache_t *jffs2_inode_cachep; +static struct kmem_cache *jffs2_inode_cachep; static struct inode *jffs2_alloc_inode(struct super_block *sb) { struct jffs2_inode_info *ei; - ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, SLAB_KERNEL); + ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -44,7 +44,7 @@ static void jffs2_destroy_inode(struct inode *inode) kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode)); } -static void jffs2_i_init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void jffs2_i_init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo; diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c index b9b700730dfe..70707309dfa1 100644 --- a/fs/jffs2/wbuf.c +++ b/fs/jffs2/wbuf.c @@ -19,6 +19,7 @@ #include <linux/crc32.h> #include <linux/mtd/nand.h> #include <linux/jiffies.h> +#include <linux/sched.h> #include "nodelist.h" diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c index 37db52488262..ed814b1ff4d9 100644 --- a/fs/jfs/ioctl.c +++ b/fs/jfs/ioctl.c @@ -9,6 +9,7 @@ #include <linux/ctype.h> #include <linux/capability.h> #include <linux/time.h> +#include <linux/sched.h> #include <asm/current.h> #include <asm/uaccess.h> diff --git a/fs/jfs/jfs_filsys.h b/fs/jfs/jfs_filsys.h index 9901928668cf..eb550b339bb8 100644 --- a/fs/jfs/jfs_filsys.h +++ b/fs/jfs/jfs_filsys.h @@ -81,7 +81,7 @@ #define JFS_SWAP_BYTES 0x00100000 /* running on big endian computer */ /* Directory index */ -#define JFS_DIR_INDEX 0x00200000 /* Persistant index for */ +#define JFS_DIR_INDEX 0x00200000 /* Persistent index for */ /* directory entries */ diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index b89c9aba0466..5065baa530b6 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -67,7 +67,7 @@ #include <linux/kthread.h> #include <linux/buffer_head.h> /* for sync_blockdev() */ #include <linux/bio.h> -#include <linux/suspend.h> +#include <linux/freezer.h> #include <linux/delay.h> #include <linux/mutex.h> #include "jfs_incore.h" diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index 0cccd1c39d75..b1a1c7296014 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c @@ -74,7 +74,7 @@ static inline void lock_metapage(struct metapage *mp) } #define METAPOOL_MIN_PAGES 32 -static kmem_cache_t *metapage_cache; +static struct kmem_cache *metapage_cache; static mempool_t *metapage_mempool; #define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE) @@ -180,7 +180,7 @@ static inline void remove_metapage(struct page *page, struct metapage *mp) #endif -static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) +static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct metapage *mp = (struct metapage *)foo; diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index 81f6f04af192..d558e51b0df8 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c @@ -46,7 +46,7 @@ #include <linux/vmalloc.h> #include <linux/smp_lock.h> #include <linux/completion.h> -#include <linux/suspend.h> +#include <linux/freezer.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kthread.h> diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 9c1c6e0e633d..846ac8f34513 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -44,7 +44,7 @@ MODULE_DESCRIPTION("The Journaled Filesystem (JFS)"); MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM"); MODULE_LICENSE("GPL"); -static kmem_cache_t * jfs_inode_cachep; +static struct kmem_cache * jfs_inode_cachep; static struct super_operations jfs_super_operations; static struct export_operations jfs_export_operations; @@ -93,7 +93,7 @@ void jfs_error(struct super_block *sb, const char * function, ...) va_list args; va_start(args, function); - vsprintf(error_buf, function, args); + vsnprintf(error_buf, sizeof(error_buf), function, args); va_end(args); printk(KERN_ERR "ERROR: (device %s): %s\n", sb->s_id, error_buf); @@ -748,7 +748,7 @@ static struct file_system_type jfs_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; -static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) { struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 3d84f600b633..497c3cd59d52 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -13,6 +13,7 @@ #include <linux/nfs_fs.h> #include <linux/utsname.h> #include <linux/smp_lock.h> +#include <linux/freezer.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svc.h> #include <linux/lockd/lockd.h> @@ -729,7 +730,7 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data) goto retry_cancel; } - dprintk("lockd: cancel status %d (task %d)\n", + dprintk("lockd: cancel status %u (task %u)\n", req->a_res.status, task->tk_pid); switch (req->a_res.status) { diff --git a/fs/lockd/host.c b/fs/lockd/host.c index fb24a9730345..3d4610c2a266 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -36,34 +36,14 @@ static DEFINE_MUTEX(nlm_host_mutex); static void nlm_gc_hosts(void); static struct nsm_handle * __nsm_find(const struct sockaddr_in *, const char *, int, int); - -/* - * Find an NLM server handle in the cache. If there is none, create it. - */ -struct nlm_host * -nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version, - const char *hostname, int hostname_len) -{ - return nlm_lookup_host(0, sin, proto, version, - hostname, hostname_len); -} - -/* - * Find an NLM client handle in the cache. If there is none, create it. - */ -struct nlm_host * -nlmsvc_lookup_host(struct svc_rqst *rqstp, - const char *hostname, int hostname_len) -{ - return nlm_lookup_host(1, &rqstp->rq_addr, - rqstp->rq_prot, rqstp->rq_vers, - hostname, hostname_len); -} +static struct nsm_handle * nsm_find(const struct sockaddr_in *sin, + const char *hostname, + int hostname_len); /* * Common host lookup routine for server & client */ -struct nlm_host * +static struct nlm_host * nlm_lookup_host(int server, const struct sockaddr_in *sin, int proto, int version, const char *hostname, @@ -195,6 +175,29 @@ nlm_destroy_host(struct nlm_host *host) } /* + * Find an NLM server handle in the cache. If there is none, create it. + */ +struct nlm_host * +nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version, + const char *hostname, int hostname_len) +{ + return nlm_lookup_host(0, sin, proto, version, + hostname, hostname_len); +} + +/* + * Find an NLM client handle in the cache. If there is none, create it. + */ +struct nlm_host * +nlmsvc_lookup_host(struct svc_rqst *rqstp, + const char *hostname, int hostname_len) +{ + return nlm_lookup_host(1, &rqstp->rq_addr, + rqstp->rq_prot, rqstp->rq_vers, + hostname, hostname_len); +} + +/* * Create the NLM RPC client for an NLM peer */ struct rpc_clnt * @@ -495,7 +498,7 @@ out: return nsm; } -struct nsm_handle * +static struct nsm_handle * nsm_find(const struct sockaddr_in *sin, const char *hostname, int hostname_len) { return __nsm_find(sin, hostname, hostname_len, 1); diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 634139232aaf..8ca18085e68d 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -353,9 +353,6 @@ EXPORT_SYMBOL(lockd_down); * Sysctl parameters (same as module parameters, different interface). */ -/* Something that isn't CTL_ANY, CTL_NONE or a value that may clash. */ -#define CTL_UNNUMBERED -2 - static ctl_table nlm_sysctls[] = { { .ctl_name = CTL_UNNUMBERED, diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c index 0ce5c81ff507..f67146a8199a 100644 --- a/fs/lockd/svc4proc.c +++ b/fs/lockd/svc4proc.c @@ -234,7 +234,7 @@ nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, */ static void nlm4svc_callback_exit(struct rpc_task *task, void *data) { - dprintk("lockd: %4d callback returned %d\n", task->tk_pid, + dprintk("lockd: %5u callback returned %d\n", task->tk_pid, -task->tk_status); } diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c index 32e99a6e8dca..3707c3a23e93 100644 --- a/fs/lockd/svcproc.c +++ b/fs/lockd/svcproc.c @@ -263,7 +263,7 @@ nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, */ static void nlmsvc_callback_exit(struct rpc_task *task, void *data) { - dprintk("lockd: %4d callback returned %d\n", task->tk_pid, + dprintk("lockd: %5u callback returned %d\n", task->tk_pid, -task->tk_status); } diff --git a/fs/locks.c b/fs/locks.c index e0b6a80649a0..1cb0c57fedbd 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -142,12 +142,12 @@ int lease_break_time = 45; static LIST_HEAD(file_lock_list); static LIST_HEAD(blocked_list); -static kmem_cache_t *filelock_cache __read_mostly; +static struct kmem_cache *filelock_cache __read_mostly; /* Allocate an empty lock structure. */ static struct file_lock *locks_alloc_lock(void) { - return kmem_cache_alloc(filelock_cache, SLAB_KERNEL); + return kmem_cache_alloc(filelock_cache, GFP_KERNEL); } static void locks_release_private(struct file_lock *fl) @@ -199,7 +199,7 @@ EXPORT_SYMBOL(locks_init_lock); * Initialises the fields of the file lock which are invariant for * free file_locks. */ -static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags) +static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags) { struct file_lock *lock = (struct file_lock *) foo; diff --git a/fs/mbcache.c b/fs/mbcache.c index 0ff71256e65b..deeb9dc062d9 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -85,7 +85,7 @@ struct mb_cache { #ifndef MB_CACHE_INDEXES_COUNT int c_indexes_count; #endif - kmem_cache_t *c_entry_cache; + struct kmem_cache *c_entry_cache; struct list_head *c_block_hash; struct list_head *c_indexes_hash[0]; }; diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 1e36bae4d0eb..629e09b38c5c 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -51,12 +51,12 @@ static void minix_put_super(struct super_block *sb) return; } -static kmem_cache_t * minix_inode_cachep; +static struct kmem_cache * minix_inode_cachep; static struct inode *minix_alloc_inode(struct super_block *sb) { struct minix_inode_info *ei; - ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, SLAB_KERNEL); + ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -67,7 +67,7 @@ static void minix_destroy_inode(struct inode *inode) kmem_cache_free(minix_inode_cachep, minix_i(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct minix_inode_info *ei = (struct minix_inode_info *) foo; diff --git a/fs/msdos/namei.c b/fs/msdos/namei.c index b0f01b3b0536..452461955cbd 100644 --- a/fs/msdos/namei.c +++ b/fs/msdos/namei.c @@ -654,6 +654,7 @@ static struct inode_operations msdos_dir_inode_operations = { .rmdir = msdos_rmdir, .rename = msdos_rename, .setattr = fat_notify_change, + .getattr = fat_getattr, }; static int msdos_fill_super(struct super_block *sb, void *data, int silent) diff --git a/fs/namei.c b/fs/namei.c index 28d49b301d55..db1bca26d88c 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -249,9 +249,11 @@ int permission(struct inode *inode, int mask, struct nameidata *nd) /* * MAY_EXEC on regular files requires special handling: We override - * filesystem execute permissions if the mode bits aren't set. + * filesystem execute permissions if the mode bits aren't set or + * the fs is mounted with the "noexec" flag. */ - if ((mask & MAY_EXEC) && S_ISREG(mode) && !(mode & S_IXUGO)) + if ((mask & MAY_EXEC) && S_ISREG(mode) && (!(mode & S_IXUGO) || + (nd && nd->mnt && (nd->mnt->mnt_flags & MNT_NOEXEC)))) return -EACCES; /* Ordinary permission routines do not understand MAY_APPEND. */ @@ -1996,8 +1998,7 @@ asmlinkage long sys_mkdir(const char __user *pathname, int mode) void dentry_unhash(struct dentry *dentry) { dget(dentry); - if (atomic_read(&dentry->d_count)) - shrink_dcache_parent(dentry); + shrink_dcache_parent(dentry); spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); if (atomic_read(&dentry->d_count) == 2) diff --git a/fs/namespace.c b/fs/namespace.c index 55442a6cf221..b00ac84ebbdd 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -36,7 +36,7 @@ static int event; static struct list_head *mount_hashtable __read_mostly; static int hash_mask __read_mostly, hash_bits __read_mostly; -static kmem_cache_t *mnt_cache __read_mostly; +static struct kmem_cache *mnt_cache __read_mostly; static struct rw_semaphore namespace_sem; /* /sys/fs */ diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 42e3bef270c9..fae53243bb92 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -40,12 +40,12 @@ static void ncp_delete_inode(struct inode *); static void ncp_put_super(struct super_block *); static int ncp_statfs(struct dentry *, struct kstatfs *); -static kmem_cache_t * ncp_inode_cachep; +static struct kmem_cache * ncp_inode_cachep; static struct inode *ncp_alloc_inode(struct super_block *sb) { struct ncp_inode_info *ei; - ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, SLAB_KERNEL); + ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -56,7 +56,7 @@ static void ncp_destroy_inode(struct inode *inode) kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct ncp_inode_info *ei = (struct ncp_inode_info *) foo; @@ -577,12 +577,12 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) server->rcv.ptr = (unsigned char*)&server->rcv.buf; server->rcv.len = 10; server->rcv.state = 0; - INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server); - INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server); + INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc); + INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc); sock->sk->sk_write_space = ncp_tcp_write_space; } else { - INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server); - INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server); + INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc); + INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc); server->timeout_tm.data = (unsigned long)server; server->timeout_tm.function = ncpdgram_timeout_call; } diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c index 11c2b252ebed..e496d8b65e92 100644 --- a/fs/ncpfs/sock.c +++ b/fs/ncpfs/sock.c @@ -350,9 +350,10 @@ static void info_server(struct ncp_server *server, unsigned int id, const void * } } -void ncpdgram_rcv_proc(void *s) +void ncpdgram_rcv_proc(struct work_struct *work) { - struct ncp_server *server = s; + struct ncp_server *server = + container_of(work, struct ncp_server, rcv.tq); struct socket* sock; sock = server->ncp_sock; @@ -468,9 +469,10 @@ static void __ncpdgram_timeout_proc(struct ncp_server *server) } } -void ncpdgram_timeout_proc(void *s) +void ncpdgram_timeout_proc(struct work_struct *work) { - struct ncp_server *server = s; + struct ncp_server *server = + container_of(work, struct ncp_server, timeout_tq); mutex_lock(&server->rcv.creq_mutex); __ncpdgram_timeout_proc(server); mutex_unlock(&server->rcv.creq_mutex); @@ -652,18 +654,20 @@ skipdata:; } } -void ncp_tcp_rcv_proc(void *s) +void ncp_tcp_rcv_proc(struct work_struct *work) { - struct ncp_server *server = s; + struct ncp_server *server = + container_of(work, struct ncp_server, rcv.tq); mutex_lock(&server->rcv.creq_mutex); __ncptcp_rcv_proc(server); mutex_unlock(&server->rcv.creq_mutex); } -void ncp_tcp_tx_proc(void *s) +void ncp_tcp_tx_proc(struct work_struct *work) { - struct ncp_server *server = s; + struct ncp_server *server = + container_of(work, struct ncp_server, tx.tq); mutex_lock(&server->rcv.creq_mutex); __ncptcp_try_send(server); diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 5fea638743e4..23ab145daa2d 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -143,7 +143,7 @@ static struct nfs_client *nfs_alloc_client(const char *hostname, INIT_LIST_HEAD(&clp->cl_state_owners); INIT_LIST_HEAD(&clp->cl_unused); spin_lock_init(&clp->cl_lock); - INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp); + INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state); rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); clp->cl_boot_time = CURRENT_TIME; clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index bdfabf854a51..f9d678f4ae06 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -58,7 +58,7 @@ #define NFSDBG_FACILITY NFSDBG_VFS -static kmem_cache_t *nfs_direct_cachep; +static struct kmem_cache *nfs_direct_cachep; /* * This represents a set of asynchronous requests that we're waiting on @@ -143,7 +143,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void) { struct nfs_direct_req *dreq; - dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL); + dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL); if (!dreq) return NULL; @@ -307,9 +307,7 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo data->task.tk_cookie = (unsigned long) inode; - lock_kernel(); rpc_execute(&data->task); - unlock_kernel(); dfprintk(VFS, "NFS: %5u initiated direct read call (req %s/%Ld, %zu bytes @ offset %Lu)\n", data->task.tk_pid, @@ -475,9 +473,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid); - lock_kernel(); rpc_execute(&data->task); - unlock_kernel(); } static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode) @@ -641,9 +637,7 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l data->task.tk_priority = RPC_PRIORITY_NORMAL; data->task.tk_cookie = (unsigned long) inode; - lock_kernel(); rpc_execute(&data->task); - unlock_kernel(); dfprintk(VFS, "NFS: %5u initiated direct write call (req %s/%Ld, %zu bytes @ offset %Lu)\n", data->task.tk_pid, diff --git a/fs/nfs/file.c b/fs/nfs/file.c index cc93865cea93..8e28bffc35a0 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -307,28 +307,28 @@ static int nfs_commit_write(struct file *file, struct page *page, unsigned offse static void nfs_invalidate_page(struct page *page, unsigned long offset) { - struct inode *inode = page->mapping->host; - + if (offset != 0) + return; /* Cancel any unstarted writes on this page */ - if (offset == 0) - nfs_sync_inode_wait(inode, page->index, 1, FLUSH_INVALIDATE); + nfs_wb_page_priority(page->mapping->host, page, FLUSH_INVALIDATE); } static int nfs_release_page(struct page *page, gfp_t gfp) { - if (gfp & __GFP_FS) - return !nfs_wb_page(page->mapping->host, page); - else - /* - * Avoid deadlock on nfs_wait_on_request(). - */ + /* + * Avoid deadlock on nfs_wait_on_request(). + */ + if (!(gfp & __GFP_FS)) return 0; + /* Hack... Force nfs_wb_page() to write out the page */ + SetPageDirty(page); + return !nfs_wb_page(page->mapping->host, page); } const struct address_space_operations nfs_file_aops = { .readpage = nfs_readpage, .readpages = nfs_readpages, - .set_page_dirty = __set_page_dirty_nobuffers, + .set_page_dirty = nfs_set_page_dirty, .writepage = nfs_writepage, .writepages = nfs_writepages, .prepare_write = nfs_prepare_write, @@ -375,6 +375,12 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov, nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count); result = generic_file_aio_write(iocb, iov, nr_segs, pos); + /* Return error values for O_SYNC and IS_SYNC() */ + if (result >= 0 && (IS_SYNC(inode) || (iocb->ki_filp->f_flags & O_SYNC))) { + int err = nfs_fsync(iocb->ki_filp, dentry, 1); + if (err < 0) + result = err; + } out: return result; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 08cc4c5919ab..36680d1061b0 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -55,7 +55,7 @@ static int nfs_update_inode(struct inode *, struct nfs_fattr *); static void nfs_zap_acl_cache(struct inode *); -static kmem_cache_t * nfs_inode_cachep; +static struct kmem_cache * nfs_inode_cachep; static inline unsigned long nfs_fattr_to_ino_t(struct nfs_fattr *fattr) @@ -422,7 +422,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) int err; /* Flush out writes to the server in order to update c/mtime */ - nfs_sync_inode_wait(inode, 0, 0, FLUSH_NOCOMMIT); + nfs_sync_mapping_range(inode->i_mapping, 0, 0, FLUSH_NOCOMMIT); /* * We may force a getattr if the user cares about atime. @@ -1080,7 +1080,7 @@ void nfs4_clear_inode(struct inode *inode) struct inode *nfs_alloc_inode(struct super_block *sb) { struct nfs_inode *nfsi; - nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, SLAB_KERNEL); + nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, GFP_KERNEL); if (!nfsi) return NULL; nfsi->flags = 0UL; @@ -1111,7 +1111,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi) #endif } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct nfs_inode *nfsi = (struct nfs_inode *) foo; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index d205466233f6..a28f6ce2e131 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -217,3 +217,21 @@ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize) if (sb->s_maxbytes > MAX_LFS_FILESIZE || sb->s_maxbytes <= 0) sb->s_maxbytes = MAX_LFS_FILESIZE; } + +/* + * Determine the number of bytes of data the page contains + */ +static inline +unsigned int nfs_page_length(struct page *page) +{ + loff_t i_size = i_size_read(page->mapping->host); + + if (i_size > 0) { + pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; + if (page->index < end_index) + return PAGE_CACHE_SIZE; + if (page->index == end_index) + return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1; + } + return 0; +} diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index ec1114b33d89..371b804e7cc8 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -18,10 +18,10 @@ #define NFSDBG_FACILITY NFSDBG_VFS -static void nfs_expire_automounts(void *list); +static void nfs_expire_automounts(struct work_struct *work); LIST_HEAD(nfs_automount_list); -static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list); +static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts); int nfs_mountpoint_expiry_timeout = 500 * HZ; static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent, @@ -164,9 +164,9 @@ struct inode_operations nfs_referral_inode_operations = { .follow_link = nfs_follow_mountpoint, }; -static void nfs_expire_automounts(void *data) +static void nfs_expire_automounts(struct work_struct *work) { - struct list_head *list = (struct list_head *)data; + struct list_head *list = &nfs_automount_list; mark_mounts_for_expiry(list); if (!list_empty(list)) diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index e5f128ffc32d..510ae524f3fd 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -276,51 +276,6 @@ static int nfs3_proc_read(struct nfs_read_data *rdata) return status; } -static int nfs3_proc_write(struct nfs_write_data *wdata) -{ - int rpcflags = wdata->flags; - struct inode * inode = wdata->inode; - struct nfs_fattr * fattr = wdata->res.fattr; - struct rpc_message msg = { - .rpc_proc = &nfs3_procedures[NFS3PROC_WRITE], - .rpc_argp = &wdata->args, - .rpc_resp = &wdata->res, - .rpc_cred = wdata->cred, - }; - int status; - - dprintk("NFS call write %d @ %Ld\n", wdata->args.count, - (long long) wdata->args.offset); - nfs_fattr_init(fattr); - status = rpc_call_sync(NFS_CLIENT(inode), &msg, rpcflags); - if (status >= 0) - nfs_post_op_update_inode(inode, fattr); - dprintk("NFS reply write: %d\n", status); - return status < 0? status : wdata->res.count; -} - -static int nfs3_proc_commit(struct nfs_write_data *cdata) -{ - struct inode * inode = cdata->inode; - struct nfs_fattr * fattr = cdata->res.fattr; - struct rpc_message msg = { - .rpc_proc = &nfs3_procedures[NFS3PROC_COMMIT], - .rpc_argp = &cdata->args, - .rpc_resp = &cdata->res, - .rpc_cred = cdata->cred, - }; - int status; - - dprintk("NFS call commit %d @ %Ld\n", cdata->args.count, - (long long) cdata->args.offset); - nfs_fattr_init(fattr); - status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); - if (status >= 0) - nfs_post_op_update_inode(inode, fattr); - dprintk("NFS reply commit: %d\n", status); - return status; -} - /* * Create a regular file. * For now, we don't implement O_EXCL. @@ -369,7 +324,7 @@ again: /* If the server doesn't support the exclusive creation semantics, * try again with simple 'guarded' mode. */ - if (status == NFSERR_NOTSUPP) { + if (status == -ENOTSUPP) { switch (arg.createmode) { case NFS3_CREATE_EXCLUSIVE: arg.createmode = NFS3_CREATE_GUARDED; @@ -690,8 +645,6 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, }; int status; - lock_kernel(); - if (plus) msg.rpc_proc = &nfs3_procedures[NFS3PROC_READDIRPLUS]; @@ -702,7 +655,6 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_refresh_inode(dir, &dir_attr); dprintk("NFS reply readdir: %d\n", status); - unlock_kernel(); return status; } @@ -904,8 +856,6 @@ const struct nfs_rpc_ops nfs_v3_clientops = { .access = nfs3_proc_access, .readlink = nfs3_proc_readlink, .read = nfs3_proc_read, - .write = nfs3_proc_write, - .commit = nfs3_proc_commit, .create = nfs3_proc_create, .remove = nfs3_proc_remove, .unlink_setup = nfs3_proc_unlink_setup, diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 6f346677332d..c26cd978c7cc 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -185,7 +185,7 @@ extern const u32 nfs4_fs_locations_bitmap[2]; extern void nfs4_schedule_state_renewal(struct nfs_client *); extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); extern void nfs4_kill_renewd(struct nfs_client *); -extern void nfs4_renew_state(void *); +extern void nfs4_renew_state(struct work_struct *); /* nfs4state.c */ struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 8118036cc449..ee458aeab24a 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -636,7 +636,7 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) smp_wmb(); } else status = data->rpc_status; - rpc_release_task(task); + rpc_put_task(task); return status; } @@ -742,7 +742,7 @@ static int _nfs4_proc_open(struct nfs4_opendata *data) smp_wmb(); } else status = data->rpc_status; - rpc_release_task(task); + rpc_put_task(task); if (status != 0) return status; @@ -1775,89 +1775,6 @@ static int nfs4_proc_read(struct nfs_read_data *rdata) return err; } -static int _nfs4_proc_write(struct nfs_write_data *wdata) -{ - int rpcflags = wdata->flags; - struct inode *inode = wdata->inode; - struct nfs_fattr *fattr = wdata->res.fattr; - struct nfs_server *server = NFS_SERVER(inode); - struct rpc_message msg = { - .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE], - .rpc_argp = &wdata->args, - .rpc_resp = &wdata->res, - .rpc_cred = wdata->cred, - }; - int status; - - dprintk("NFS call write %d @ %Ld\n", wdata->args.count, - (long long) wdata->args.offset); - - wdata->args.bitmask = server->attr_bitmask; - wdata->res.server = server; - wdata->timestamp = jiffies; - nfs_fattr_init(fattr); - status = rpc_call_sync(server->client, &msg, rpcflags); - dprintk("NFS reply write: %d\n", status); - if (status < 0) - return status; - renew_lease(server, wdata->timestamp); - nfs_post_op_update_inode(inode, fattr); - return wdata->res.count; -} - -static int nfs4_proc_write(struct nfs_write_data *wdata) -{ - struct nfs4_exception exception = { }; - int err; - do { - err = nfs4_handle_exception(NFS_SERVER(wdata->inode), - _nfs4_proc_write(wdata), - &exception); - } while (exception.retry); - return err; -} - -static int _nfs4_proc_commit(struct nfs_write_data *cdata) -{ - struct inode *inode = cdata->inode; - struct nfs_fattr *fattr = cdata->res.fattr; - struct nfs_server *server = NFS_SERVER(inode); - struct rpc_message msg = { - .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT], - .rpc_argp = &cdata->args, - .rpc_resp = &cdata->res, - .rpc_cred = cdata->cred, - }; - int status; - - dprintk("NFS call commit %d @ %Ld\n", cdata->args.count, - (long long) cdata->args.offset); - - cdata->args.bitmask = server->attr_bitmask; - cdata->res.server = server; - cdata->timestamp = jiffies; - nfs_fattr_init(fattr); - status = rpc_call_sync(server->client, &msg, 0); - if (status >= 0) - renew_lease(server, cdata->timestamp); - dprintk("NFS reply commit: %d\n", status); - if (status >= 0) - nfs_post_op_update_inode(inode, fattr); - return status; -} - -static int nfs4_proc_commit(struct nfs_write_data *cdata) -{ - struct nfs4_exception exception = { }; - int err; - do { - err = nfs4_handle_exception(NFS_SERVER(cdata->inode), - _nfs4_proc_commit(cdata), - &exception); - } while (exception.retry); - return err; -} - /* * Got race? * We will need to arrange for the VFS layer to provide an atomic open. @@ -2223,13 +2140,11 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, dentry->d_parent->d_name.name, dentry->d_name.name, (unsigned long long)cookie); - lock_kernel(); nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args); res.pgbase = args.pgbase; status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); if (status == 0) memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); - unlock_kernel(); dprintk("%s: returns %d\n", __FUNCTION__, status); return status; } @@ -3067,7 +2982,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co if (status == 0) nfs_post_op_update_inode(inode, &data->fattr); } - rpc_release_task(task); + rpc_put_task(task); return status; } @@ -3314,7 +3229,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * if (IS_ERR(task)) goto out; status = nfs4_wait_for_completion_rpc_task(task); - rpc_release_task(task); + rpc_put_task(task); out: return status; } @@ -3430,7 +3345,7 @@ static void nfs4_lock_release(void *calldata) task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, data->arg.lock_seqid); if (!IS_ERR(task)) - rpc_release_task(task); + rpc_put_task(task); dprintk("%s: cancelling lock!\n", __FUNCTION__); } else nfs_free_seqid(data->arg.lock_seqid); @@ -3472,7 +3387,7 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f ret = -EAGAIN; } else data->cancelled = 1; - rpc_release_task(task); + rpc_put_task(task); dprintk("%s: done, ret = %d!\n", __FUNCTION__, ret); return ret; } @@ -3732,8 +3647,6 @@ const struct nfs_rpc_ops nfs_v4_clientops = { .access = nfs4_proc_access, .readlink = nfs4_proc_readlink, .read = nfs4_proc_read, - .write = nfs4_proc_write, - .commit = nfs4_proc_commit, .create = nfs4_proc_create, .remove = nfs4_proc_remove, .unlink_setup = nfs4_proc_unlink_setup, diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 7b6df1852e75..823298561c0a 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -59,9 +59,10 @@ #define NFSDBG_FACILITY NFSDBG_PROC void -nfs4_renew_state(void *data) +nfs4_renew_state(struct work_struct *work) { - struct nfs_client *clp = (struct nfs_client *)data; + struct nfs_client *clp = + container_of(work, struct nfs_client, cl_renewd.work); struct rpc_cred *cred; long lease, timeout; unsigned long last, now; diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c index 8dfefe41a8da..75f819dc0255 100644 --- a/fs/nfs/nfsroot.c +++ b/fs/nfs/nfsroot.c @@ -98,7 +98,7 @@ static char nfs_root_name[256] __initdata = ""; /* Address of NFS server */ -static __u32 servaddr __initdata = 0; +static __be32 servaddr __initdata = 0; /* Name of directory to mount */ static char nfs_path[NFS_MAXPATHLEN] __initdata = { 0, }; @@ -327,7 +327,7 @@ static int __init root_nfs_name(char *name) */ static int __init root_nfs_addr(void) { - if ((servaddr = root_server_addr) == INADDR_NONE) { + if ((servaddr = root_server_addr) == htonl(INADDR_NONE)) { printk(KERN_ERR "Root-NFS: No NFS server available, giving up.\n"); return -1; } @@ -411,7 +411,7 @@ __setup("nfsroot=", nfs_root_setup); * Construct sockaddr_in from address and port number. */ static inline void -set_sockaddr(struct sockaddr_in *sin, __u32 addr, __u16 port) +set_sockaddr(struct sockaddr_in *sin, __be32 addr, __be16 port) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = addr; @@ -468,14 +468,13 @@ static int __init root_nfs_ports(void) dprintk("Root-NFS: Portmapper on server returned %d " "as nfsd port\n", port); } - nfs_port = htons(nfs_port); if ((port = root_nfs_getport(NFS_MNT_PROGRAM, mountd_ver, proto)) < 0) { printk(KERN_ERR "Root-NFS: Unable to get mountd port " "number from server, using default\n"); port = mountd_port; } - mount_port = htons(port); + mount_port = port; dprintk("Root-NFS: mountd port is %d\n", port); return 0; @@ -496,7 +495,7 @@ static int __init root_nfs_get_handle(void) int version = (nfs_data.flags & NFS_MOUNT_VER3) ? NFS_MNT3_VERSION : NFS_MNT_VERSION; - set_sockaddr(&sin, servaddr, mount_port); + set_sockaddr(&sin, servaddr, htons(mount_port)); status = nfsroot_mount(&sin, nfs_path, &fh, version, protocol); if (status < 0) printk(KERN_ERR "Root-NFS: Server returned error %d " @@ -519,6 +518,6 @@ void * __init nfs_root_data(void) || root_nfs_ports() < 0 || root_nfs_get_handle() < 0) return NULL; - set_sockaddr((struct sockaddr_in *) &nfs_data.addr, servaddr, nfs_port); + set_sockaddr((struct sockaddr_in *) &nfs_data.addr, servaddr, htons(nfs_port)); return (void*)&nfs_data; } diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 829af323f288..ca4b1d4ff42b 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -17,16 +17,17 @@ #include <linux/nfs_page.h> #include <linux/nfs_fs.h> #include <linux/nfs_mount.h> +#include <linux/writeback.h> #define NFS_PARANOIA 1 -static kmem_cache_t *nfs_page_cachep; +static struct kmem_cache *nfs_page_cachep; static inline struct nfs_page * nfs_page_alloc(void) { struct nfs_page *p; - p = kmem_cache_alloc(nfs_page_cachep, SLAB_KERNEL); + p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL); if (p) { memset(p, 0, sizeof(*p)); INIT_LIST_HEAD(&p->wb_list); @@ -268,11 +269,10 @@ nfs_coalesce_requests(struct list_head *head, struct list_head *dst, #define NFS_SCAN_MAXENTRIES 16 /** - * nfs_scan_lock_dirty - Scan the radix tree for dirty requests - * @nfsi: NFS inode + * nfs_scan_dirty - Scan the radix tree for dirty requests + * @mapping: pointer to address space + * @wbc: writeback_control structure * @dst: Destination list - * @idx_start: lower bound of page->index to scan - * @npages: idx_start + npages sets the upper bound to scan. * * Moves elements from one of the inode request lists. * If the number of requests is set to 0, the entire address_space @@ -280,46 +280,63 @@ nfs_coalesce_requests(struct list_head *head, struct list_head *dst, * The requests are *not* checked to ensure that they form a contiguous set. * You must be holding the inode's req_lock when calling this function */ -int -nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst, - unsigned long idx_start, unsigned int npages) +long nfs_scan_dirty(struct address_space *mapping, + struct writeback_control *wbc, + struct list_head *dst) { + struct nfs_inode *nfsi = NFS_I(mapping->host); struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; struct nfs_page *req; - unsigned long idx_end; + pgoff_t idx_start, idx_end; + long res = 0; int found, i; - int res; - res = 0; - if (npages == 0) - idx_end = ~0; - else - idx_end = idx_start + npages - 1; + if (nfsi->ndirty == 0) + return 0; + if (wbc->range_cyclic) { + idx_start = 0; + idx_end = ULONG_MAX; + } else if (wbc->range_end == 0) { + idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; + idx_end = ULONG_MAX; + } else { + idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; + idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; + } for (;;) { + unsigned int toscan = NFS_SCAN_MAXENTRIES; + found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, - (void **)&pgvec[0], idx_start, NFS_SCAN_MAXENTRIES, + (void **)&pgvec[0], idx_start, toscan, NFS_PAGE_TAG_DIRTY); + + /* Did we make progress? */ if (found <= 0) break; + for (i = 0; i < found; i++) { req = pgvec[i]; - if (req->wb_index > idx_end) + if (!wbc->range_cyclic && req->wb_index > idx_end) goto out; + /* Try to lock request and mark it for writeback */ + if (!nfs_set_page_writeback_locked(req)) + goto next; + radix_tree_tag_clear(&nfsi->nfs_page_tree, + req->wb_index, NFS_PAGE_TAG_DIRTY); + nfsi->ndirty--; + nfs_list_remove_request(req); + nfs_list_add_request(req, dst); + res++; + if (res == LONG_MAX) + goto out; +next: idx_start = req->wb_index + 1; - - if (nfs_set_page_writeback_locked(req)) { - radix_tree_tag_clear(&nfsi->nfs_page_tree, - req->wb_index, NFS_PAGE_TAG_DIRTY); - nfs_list_remove_request(req); - nfs_list_add_request(req, dst); - dec_zone_page_state(req->wb_page, NR_FILE_DIRTY); - res++; - } } } out: + WARN_ON ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)); return res; } diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index 4529cc4f3f8f..10f5e80ca157 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -215,32 +215,6 @@ static int nfs_proc_read(struct nfs_read_data *rdata) return status; } -static int nfs_proc_write(struct nfs_write_data *wdata) -{ - int flags = wdata->flags; - struct inode * inode = wdata->inode; - struct nfs_fattr * fattr = wdata->res.fattr; - struct rpc_message msg = { - .rpc_proc = &nfs_procedures[NFSPROC_WRITE], - .rpc_argp = &wdata->args, - .rpc_resp = &wdata->res, - .rpc_cred = wdata->cred, - }; - int status; - - dprintk("NFS call write %d @ %Ld\n", wdata->args.count, - (long long) wdata->args.offset); - nfs_fattr_init(fattr); - status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags); - if (status >= 0) { - nfs_post_op_update_inode(inode, fattr); - wdata->res.count = wdata->args.count; - wdata->verf.committed = NFS_FILE_SYNC; - } - dprintk("NFS reply write: %d\n", status); - return status < 0? status : wdata->res.count; -} - static int nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, int flags, struct nameidata *nd) @@ -545,13 +519,10 @@ nfs_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, }; int status; - lock_kernel(); - dprintk("NFS call readdir %d\n", (unsigned int)cookie); status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); dprintk("NFS reply readdir: %d\n", status); - unlock_kernel(); return status; } @@ -696,8 +667,6 @@ const struct nfs_rpc_ops nfs_v2_clientops = { .access = NULL, /* access */ .readlink = nfs_proc_readlink, .read = nfs_proc_read, - .write = nfs_proc_write, - .commit = NULL, /* commit */ .create = nfs_proc_create, .remove = nfs_proc_remove, .unlink_setup = nfs_proc_unlink_setup, diff --git a/fs/nfs/read.c b/fs/nfs/read.c index c2e49c397a27..a9c26521a9e2 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -30,6 +30,7 @@ #include <asm/system.h> +#include "internal.h" #include "iostat.h" #define NFSDBG_FACILITY NFSDBG_PAGECACHE @@ -38,7 +39,7 @@ static int nfs_pagein_one(struct list_head *, struct inode *); static const struct rpc_call_ops nfs_read_partial_ops; static const struct rpc_call_ops nfs_read_full_ops; -static kmem_cache_t *nfs_rdata_cachep; +static struct kmem_cache *nfs_rdata_cachep; static mempool_t *nfs_rdata_mempool; #define MIN_POOL_READ (32) @@ -46,7 +47,7 @@ static mempool_t *nfs_rdata_mempool; struct nfs_read_data *nfs_readdata_alloc(size_t len) { unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; - struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS); + struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS); if (p) { memset(p, 0, sizeof(*p)); @@ -65,32 +66,22 @@ struct nfs_read_data *nfs_readdata_alloc(size_t len) return p; } -static void nfs_readdata_free(struct nfs_read_data *p) +static void nfs_readdata_rcu_free(struct rcu_head *head) { + struct nfs_read_data *p = container_of(head, struct nfs_read_data, task.u.tk_rcu); if (p && (p->pagevec != &p->page_array[0])) kfree(p->pagevec); mempool_free(p, nfs_rdata_mempool); } -void nfs_readdata_release(void *data) +static void nfs_readdata_free(struct nfs_read_data *rdata) { - nfs_readdata_free(data); + call_rcu_bh(&rdata->task.u.tk_rcu, nfs_readdata_rcu_free); } -static -unsigned int nfs_page_length(struct inode *inode, struct page *page) +void nfs_readdata_release(void *data) { - loff_t i_size = i_size_read(inode); - unsigned long idx; - - if (i_size <= 0) - return 0; - idx = (i_size - 1) >> PAGE_CACHE_SHIFT; - if (page->index > idx) - return 0; - if (page->index != idx) - return PAGE_CACHE_SIZE; - return 1 + ((i_size - 1) & (PAGE_CACHE_SIZE - 1)); + nfs_readdata_free(data); } static @@ -139,12 +130,12 @@ static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode, { unsigned int rsize = NFS_SERVER(inode)->rsize; unsigned int count = PAGE_CACHE_SIZE; - int result; + int result = -ENOMEM; struct nfs_read_data *rdata; rdata = nfs_readdata_alloc(count); if (!rdata) - return -ENOMEM; + goto out_unlock; memset(rdata, 0, sizeof(*rdata)); rdata->flags = (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0); @@ -212,8 +203,9 @@ static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode, result = 0; io_error: - unlock_page(page); nfs_readdata_free(rdata); +out_unlock: + unlock_page(page); return result; } @@ -224,7 +216,7 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, struct nfs_page *new; unsigned int len; - len = nfs_page_length(inode, page); + len = nfs_page_length(page); if (len == 0) return nfs_return_empty_page(page); new = nfs_create_request(ctx, inode, page, 0, len); @@ -316,9 +308,7 @@ static void nfs_execute_read(struct nfs_read_data *data) sigset_t oldset; rpc_clnt_sigmask(clnt, &oldset); - lock_kernel(); rpc_execute(&data->task); - unlock_kernel(); rpc_clnt_sigunmask(clnt, &oldset); } @@ -455,6 +445,55 @@ nfs_pagein_list(struct list_head *head, int rpages) } /* + * This is the callback from RPC telling us whether a reply was + * received or some error occurred (timeout or socket shutdown). + */ +int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data) +{ + int status; + + dprintk("%s: %4d, (status %d)\n", __FUNCTION__, task->tk_pid, + task->tk_status); + + status = NFS_PROTO(data->inode)->read_done(task, data); + if (status != 0) + return status; + + nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count); + + if (task->tk_status == -ESTALE) { + set_bit(NFS_INO_STALE, &NFS_FLAGS(data->inode)); + nfs_mark_for_revalidate(data->inode); + } + spin_lock(&data->inode->i_lock); + NFS_I(data->inode)->cache_validity |= NFS_INO_INVALID_ATIME; + spin_unlock(&data->inode->i_lock); + return 0; +} + +static int nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data) +{ + struct nfs_readargs *argp = &data->args; + struct nfs_readres *resp = &data->res; + + if (resp->eof || resp->count == argp->count) + return 0; + + /* This is a short read! */ + nfs_inc_stats(data->inode, NFSIOS_SHORTREAD); + /* Has the server at least made some progress? */ + if (resp->count == 0) + return 0; + + /* Yes, so retry the read at the end of the data */ + argp->offset += resp->count; + argp->pgbase += resp->count; + argp->count -= resp->count; + rpc_restart_call(task); + return -EAGAIN; +} + +/* * Handle a read reply that fills part of a page. */ static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata) @@ -463,12 +502,16 @@ static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata) struct nfs_page *req = data->req; struct page *page = req->wb_page; - if (likely(task->tk_status >= 0)) - nfs_readpage_truncate_uninitialised_page(data); - else - SetPageError(page); if (nfs_readpage_result(task, data) != 0) return; + + if (likely(task->tk_status >= 0)) { + nfs_readpage_truncate_uninitialised_page(data); + if (nfs_readpage_retry(task, data) != 0) + return; + } + if (unlikely(task->tk_status < 0)) + SetPageError(page); if (atomic_dec_and_test(&req->wb_complete)) { if (!PageError(page)) SetPageUptodate(page); @@ -496,25 +539,13 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) count += base; for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) SetPageUptodate(*pages); - if (count != 0) + if (count == 0) + return; + /* Was this a short read? */ + if (data->res.eof || data->res.count == data->args.count) SetPageUptodate(*pages); } -static void nfs_readpage_set_pages_error(struct nfs_read_data *data) -{ - unsigned int count = data->args.count; - unsigned int base = data->args.pgbase; - struct page **pages; - - pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; - base &= ~PAGE_CACHE_MASK; - count += base; - for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) - SetPageError(*pages); - if (count != 0) - SetPageError(*pages); -} - /* * This is the callback from RPC telling us whether a reply was * received or some error occurred (timeout or socket shutdown). @@ -523,19 +554,20 @@ static void nfs_readpage_result_full(struct rpc_task *task, void *calldata) { struct nfs_read_data *data = calldata; + if (nfs_readpage_result(task, data) != 0) + return; /* - * Note: nfs_readpage_result may change the values of + * Note: nfs_readpage_retry may change the values of * data->args. In the multi-page case, we therefore need - * to ensure that we call the next nfs_readpage_set_page_uptodate() - * first in the multi-page case. + * to ensure that we call nfs_readpage_set_pages_uptodate() + * first. */ if (likely(task->tk_status >= 0)) { nfs_readpage_truncate_uninitialised_page(data); nfs_readpage_set_pages_uptodate(data); - } else - nfs_readpage_set_pages_error(data); - if (nfs_readpage_result(task, data) != 0) - return; + if (nfs_readpage_retry(task, data) != 0) + return; + } while (!list_empty(&data->pages)) { struct nfs_page *req = nfs_list_entry(data->pages.next); @@ -550,50 +582,6 @@ static const struct rpc_call_ops nfs_read_full_ops = { }; /* - * This is the callback from RPC telling us whether a reply was - * received or some error occurred (timeout or socket shutdown). - */ -int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data) -{ - struct nfs_readargs *argp = &data->args; - struct nfs_readres *resp = &data->res; - int status; - - dprintk("NFS: %4d nfs_readpage_result, (status %d)\n", - task->tk_pid, task->tk_status); - - status = NFS_PROTO(data->inode)->read_done(task, data); - if (status != 0) - return status; - - nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, resp->count); - - if (task->tk_status < 0) { - if (task->tk_status == -ESTALE) { - set_bit(NFS_INO_STALE, &NFS_FLAGS(data->inode)); - nfs_mark_for_revalidate(data->inode); - } - } else if (resp->count < argp->count && !resp->eof) { - /* This is a short read! */ - nfs_inc_stats(data->inode, NFSIOS_SHORTREAD); - /* Has the server at least made some progress? */ - if (resp->count != 0) { - /* Yes, so retry the read at the end of the data */ - argp->offset += resp->count; - argp->pgbase += resp->count; - argp->count -= resp->count; - rpc_restart_call(task); - return -EAGAIN; - } - task->tk_status = -EIO; - } - spin_lock(&data->inode->i_lock); - NFS_I(data->inode)->cache_validity |= NFS_INO_INVALID_ATIME; - spin_unlock(&data->inode->i_lock); - return 0; -} - -/* * Read a page over NFS. * We read the page synchronously in the following case: * - The error flag is set for this page. This happens only when a @@ -626,9 +614,10 @@ int nfs_readpage(struct file *file, struct page *page) goto out_error; if (file == NULL) { + error = -EBADF; ctx = nfs_find_open_context(inode, NULL, FMODE_READ); if (ctx == NULL) - return -EBADF; + goto out_error; } else ctx = get_nfs_open_context((struct nfs_open_context *) file->private_data); @@ -663,7 +652,7 @@ readpage_async_filler(void *data, struct page *page) unsigned int len; nfs_wb_page(inode, page); - len = nfs_page_length(inode, page); + len = nfs_page_length(page); if (len == 0) return nfs_return_empty_page(page); new = nfs_create_request(desc->ctx, inode, page, 0, len); diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c index 600bbe630abd..6c686112cc03 100644 --- a/fs/nfs/symlink.c +++ b/fs/nfs/symlink.c @@ -33,9 +33,7 @@ static int nfs_symlink_filler(struct inode *inode, struct page *page) { int error; - lock_kernel(); error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE); - unlock_kernel(); if (error < 0) goto error; SetPageUptodate(page); diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c index 2fe3403c2409..3ea50ac64820 100644 --- a/fs/nfs/sysctl.c +++ b/fs/nfs/sysctl.c @@ -18,11 +18,6 @@ static const int nfs_set_port_min = 0; static const int nfs_set_port_max = 65535; static struct ctl_table_header *nfs_callback_sysctl_table; -/* - * Something that isn't CTL_ANY, CTL_NONE or a value that may clash. - * Use the same values as fs/lockd/svc.c - */ -#define CTL_UNNUMBERED -2 static ctl_table nfs_cb_sysctls[] = { #ifdef CONFIG_NFS_V4 diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 883dd4a1c157..594eb16879ef 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -63,6 +63,7 @@ #include <linux/smp_lock.h> #include "delegation.h" +#include "internal.h" #include "iostat.h" #define NFSDBG_FACILITY NFSDBG_PAGECACHE @@ -74,18 +75,17 @@ * Local function declarations */ static struct nfs_page * nfs_update_request(struct nfs_open_context*, - struct inode *, struct page *, unsigned int, unsigned int); +static void nfs_mark_request_dirty(struct nfs_page *req); static int nfs_wait_on_write_congestion(struct address_space *, int); static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int); -static int nfs_flush_inode(struct inode *inode, unsigned long idx_start, - unsigned int npages, int how); +static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how); static const struct rpc_call_ops nfs_write_partial_ops; static const struct rpc_call_ops nfs_write_full_ops; static const struct rpc_call_ops nfs_commit_ops; -static kmem_cache_t *nfs_wdata_cachep; +static struct kmem_cache *nfs_wdata_cachep; static mempool_t *nfs_wdata_mempool; static mempool_t *nfs_commit_mempool; @@ -93,7 +93,7 @@ static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion); struct nfs_write_data *nfs_commit_alloc(void) { - struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS); + struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS); if (p) { memset(p, 0, sizeof(*p)); @@ -102,17 +102,23 @@ struct nfs_write_data *nfs_commit_alloc(void) return p; } -void nfs_commit_free(struct nfs_write_data *p) +void nfs_commit_rcu_free(struct rcu_head *head) { + struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu); if (p && (p->pagevec != &p->page_array[0])) kfree(p->pagevec); mempool_free(p, nfs_commit_mempool); } +void nfs_commit_free(struct nfs_write_data *wdata) +{ + call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free); +} + struct nfs_write_data *nfs_writedata_alloc(size_t len) { unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; - struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS); + struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS); if (p) { memset(p, 0, sizeof(*p)); @@ -131,18 +137,47 @@ struct nfs_write_data *nfs_writedata_alloc(size_t len) return p; } -static void nfs_writedata_free(struct nfs_write_data *p) +static void nfs_writedata_rcu_free(struct rcu_head *head) { + struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu); if (p && (p->pagevec != &p->page_array[0])) kfree(p->pagevec); mempool_free(p, nfs_wdata_mempool); } +static void nfs_writedata_free(struct nfs_write_data *wdata) +{ + call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free); +} + void nfs_writedata_release(void *wdata) { nfs_writedata_free(wdata); } +static struct nfs_page *nfs_page_find_request_locked(struct page *page) +{ + struct nfs_page *req = NULL; + + if (PagePrivate(page)) { + req = (struct nfs_page *)page_private(page); + if (req != NULL) + atomic_inc(&req->wb_count); + } + return req; +} + +static struct nfs_page *nfs_page_find_request(struct page *page) +{ + struct nfs_page *req = NULL; + spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; + + spin_lock(req_lock); + req = nfs_page_find_request_locked(page); + spin_unlock(req_lock); + return req; +} + /* Adjust the file length if we're writing beyond the end */ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) { @@ -164,113 +199,34 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c */ static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count) { - loff_t end_offs; - if (PageUptodate(page)) return; if (base != 0) return; - if (count == PAGE_CACHE_SIZE) { - SetPageUptodate(page); - return; - } - - end_offs = i_size_read(page->mapping->host) - 1; - if (end_offs < 0) + if (count != nfs_page_length(page)) return; - /* Is this the last page? */ - if (page->index != (unsigned long)(end_offs >> PAGE_CACHE_SHIFT)) - return; - /* This is the last page: set PG_uptodate if we cover the entire - * extent of the data, then zero the rest of the page. - */ - if (count == (unsigned int)(end_offs & (PAGE_CACHE_SIZE - 1)) + 1) { + if (count != PAGE_CACHE_SIZE) memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count); - SetPageUptodate(page); - } + SetPageUptodate(page); } -/* - * Write a page synchronously. - * Offset is the data offset within the page. - */ -static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode, - struct page *page, unsigned int offset, unsigned int count, - int how) -{ - unsigned int wsize = NFS_SERVER(inode)->wsize; - int result, written = 0; - struct nfs_write_data *wdata; - - wdata = nfs_writedata_alloc(wsize); - if (!wdata) - return -ENOMEM; - - wdata->flags = how; - wdata->cred = ctx->cred; - wdata->inode = inode; - wdata->args.fh = NFS_FH(inode); - wdata->args.context = ctx; - wdata->args.pages = &page; - wdata->args.stable = NFS_FILE_SYNC; - wdata->args.pgbase = offset; - wdata->args.count = wsize; - wdata->res.fattr = &wdata->fattr; - wdata->res.verf = &wdata->verf; - - dprintk("NFS: nfs_writepage_sync(%s/%Ld %d@%Ld)\n", - inode->i_sb->s_id, - (long long)NFS_FILEID(inode), - count, (long long)(page_offset(page) + offset)); - - set_page_writeback(page); - nfs_begin_data_update(inode); - do { - if (count < wsize) - wdata->args.count = count; - wdata->args.offset = page_offset(page) + wdata->args.pgbase; - - result = NFS_PROTO(inode)->write(wdata); - - if (result < 0) { - /* Must mark the page invalid after I/O error */ - ClearPageUptodate(page); - goto io_error; - } - if (result < wdata->args.count) - printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n", - wdata->args.count, result); - - wdata->args.offset += result; - wdata->args.pgbase += result; - written += result; - count -= result; - nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result); - } while (count); - /* Update file length */ - nfs_grow_file(page, offset, written); - /* Set the PG_uptodate flag? */ - nfs_mark_uptodate(page, offset, written); - - if (PageError(page)) - ClearPageError(page); - -io_error: - nfs_end_data_update(inode); - end_page_writeback(page); - nfs_writedata_free(wdata); - return written ? written : result; -} - -static int nfs_writepage_async(struct nfs_open_context *ctx, - struct inode *inode, struct page *page, +static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, unsigned int offset, unsigned int count) { struct nfs_page *req; + int ret; - req = nfs_update_request(ctx, inode, page, offset, count); - if (IS_ERR(req)) - return PTR_ERR(req); + for (;;) { + req = nfs_update_request(ctx, page, offset, count); + if (!IS_ERR(req)) + break; + ret = PTR_ERR(req); + if (ret != -EBUSY) + return ret; + ret = nfs_wb_page(page->mapping->host, page); + if (ret != 0) + return ret; + } /* Update file length */ nfs_grow_file(page, offset, count); /* Set the PG_uptodate flag? */ @@ -289,73 +245,94 @@ static int wb_priority(struct writeback_control *wbc) } /* + * Find an associated nfs write request, and prepare to flush it out + * Returns 1 if there was no write request, or if the request was + * already tagged by nfs_set_page_dirty.Returns 0 if the request + * was not tagged. + * May also return an error if the user signalled nfs_wait_on_request(). + */ +static int nfs_page_mark_flush(struct page *page) +{ + struct nfs_page *req; + spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; + int ret; + + spin_lock(req_lock); + for(;;) { + req = nfs_page_find_request_locked(page); + if (req == NULL) { + spin_unlock(req_lock); + return 1; + } + if (nfs_lock_request_dontget(req)) + break; + /* Note: If we hold the page lock, as is the case in nfs_writepage, + * then the call to nfs_lock_request_dontget() will always + * succeed provided that someone hasn't already marked the + * request as dirty (in which case we don't care). + */ + spin_unlock(req_lock); + ret = nfs_wait_on_request(req); + nfs_release_request(req); + if (ret != 0) + return ret; + spin_lock(req_lock); + } + spin_unlock(req_lock); + if (test_and_set_bit(PG_FLUSHING, &req->wb_flags) == 0) { + nfs_mark_request_dirty(req); + set_page_writeback(page); + } + ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); + nfs_unlock_request(req); + return ret; +} + +/* * Write an mmapped page to the server. */ -int nfs_writepage(struct page *page, struct writeback_control *wbc) +static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) { struct nfs_open_context *ctx; struct inode *inode = page->mapping->host; - unsigned long end_index; - unsigned offset = PAGE_CACHE_SIZE; - loff_t i_size = i_size_read(inode); - int inode_referenced = 0; - int priority = wb_priority(wbc); + unsigned offset; int err; nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); - /* - * Note: We need to ensure that we have a reference to the inode - * if we are to do asynchronous writes. If not, waiting - * in nfs_wait_on_request() may deadlock with clear_inode(). - * - * If igrab() fails here, then it is in any case safe to - * call nfs_wb_page(), since there will be no pending writes. - */ - if (igrab(inode) != 0) - inode_referenced = 1; - end_index = i_size >> PAGE_CACHE_SHIFT; - - /* Ensure we've flushed out any previous writes */ - nfs_wb_page_priority(inode, page, priority); - - /* easy case */ - if (page->index < end_index) - goto do_it; - /* things got complicated... */ - offset = i_size & (PAGE_CACHE_SIZE-1); - - /* OK, are we completely out? */ - err = 0; /* potential race with truncate - ignore */ - if (page->index >= end_index+1 || !offset) + err = nfs_page_mark_flush(page); + if (err <= 0) + goto out; + err = 0; + offset = nfs_page_length(page); + if (!offset) goto out; -do_it: + ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE); if (ctx == NULL) { err = -EBADF; goto out; } - lock_kernel(); - if (!IS_SYNC(inode) && inode_referenced) { - err = nfs_writepage_async(ctx, inode, page, 0, offset); - if (!wbc->for_writepages) - nfs_flush_inode(inode, 0, 0, wb_priority(wbc)); - } else { - err = nfs_writepage_sync(ctx, inode, page, 0, - offset, priority); - if (err >= 0) { - if (err != offset) - redirty_page_for_writepage(wbc, page); - err = 0; - } - } - unlock_kernel(); + err = nfs_writepage_setup(ctx, page, 0, offset); put_nfs_open_context(ctx); + if (err != 0) + goto out; + err = nfs_page_mark_flush(page); + if (err > 0) + err = 0; out: + if (!wbc->for_writepages) + nfs_flush_mapping(page->mapping, wbc, wb_priority(wbc)); + return err; +} + +int nfs_writepage(struct page *page, struct writeback_control *wbc) +{ + int err; + + err = nfs_writepage_locked(page, wbc); unlock_page(page); - if (inode_referenced) - iput(inode); return err; } @@ -379,21 +356,18 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) return 0; nfs_wait_on_write_congestion(mapping, 0); } - err = nfs_flush_inode(inode, 0, 0, wb_priority(wbc)); + err = nfs_flush_mapping(mapping, wbc, wb_priority(wbc)); if (err < 0) goto out; nfs_add_stats(inode, NFSIOS_WRITEPAGES, err); - wbc->nr_to_write -= err; if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) { err = nfs_wait_on_requests(inode, 0, 0); if (err < 0) goto out; } err = nfs_commit_inode(inode, wb_priority(wbc)); - if (err > 0) { - wbc->nr_to_write -= err; + if (err > 0) err = 0; - } out: clear_bit(BDI_write_congested, &bdi->state); wake_up_all(&nfs_write_congestion); @@ -420,6 +394,7 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) nfsi->change_attr++; } SetPagePrivate(req->wb_page); + set_page_private(req->wb_page, (unsigned long)req); nfsi->npages++; atomic_inc(&req->wb_count); return 0; @@ -436,6 +411,7 @@ static void nfs_inode_remove_request(struct nfs_page *req) BUG_ON (!NFS_WBACK_BUSY(req)); spin_lock(&nfsi->req_lock); + set_page_private(req->wb_page, 0); ClearPagePrivate(req->wb_page); radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); nfsi->npages--; @@ -450,33 +426,6 @@ static void nfs_inode_remove_request(struct nfs_page *req) } /* - * Find a request - */ -static inline struct nfs_page * -_nfs_find_request(struct inode *inode, unsigned long index) -{ - struct nfs_inode *nfsi = NFS_I(inode); - struct nfs_page *req; - - req = (struct nfs_page*)radix_tree_lookup(&nfsi->nfs_page_tree, index); - if (req) - atomic_inc(&req->wb_count); - return req; -} - -static struct nfs_page * -nfs_find_request(struct inode *inode, unsigned long index) -{ - struct nfs_page *req; - struct nfs_inode *nfsi = NFS_I(inode); - - spin_lock(&nfsi->req_lock); - req = _nfs_find_request(inode, index); - spin_unlock(&nfsi->req_lock); - return req; -} - -/* * Add a request to the inode's dirty list. */ static void @@ -491,8 +440,14 @@ nfs_mark_request_dirty(struct nfs_page *req) nfs_list_add_request(req, &nfsi->dirty); nfsi->ndirty++; spin_unlock(&nfsi->req_lock); - inc_zone_page_state(req->wb_page, NR_FILE_DIRTY); - mark_inode_dirty(inode); + __mark_inode_dirty(inode, I_DIRTY_PAGES); +} + +static void +nfs_redirty_request(struct nfs_page *req) +{ + clear_bit(PG_FLUSHING, &req->wb_flags); + __set_page_dirty_nobuffers(req->wb_page); } /* @@ -501,8 +456,7 @@ nfs_mark_request_dirty(struct nfs_page *req) static inline int nfs_dirty_request(struct nfs_page *req) { - struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); - return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty; + return test_bit(PG_FLUSHING, &req->wb_flags) == 0; } #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) @@ -520,7 +474,7 @@ nfs_mark_request_commit(struct nfs_page *req) nfsi->ncommit++; spin_unlock(&nfsi->req_lock); inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); - mark_inode_dirty(inode); + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); } #endif @@ -597,31 +551,6 @@ static void nfs_cancel_commit_list(struct list_head *head) } } -/* - * nfs_scan_dirty - Scan an inode for dirty requests - * @inode: NFS inode to scan - * @dst: destination list - * @idx_start: lower bound of page->index to scan. - * @npages: idx_start + npages sets the upper bound to scan. - * - * Moves requests from the inode's dirty page list. - * The requests are *not* checked to ensure that they form a contiguous set. - */ -static int -nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) -{ - struct nfs_inode *nfsi = NFS_I(inode); - int res = 0; - - if (nfsi->ndirty != 0) { - res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages); - nfsi->ndirty -= res; - if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)) - printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n"); - } - return res; -} - #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) /* * nfs_scan_commit - Scan an inode for commit requests @@ -698,27 +627,27 @@ static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr) * Note: Should always be called with the Page Lock held! */ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, - struct inode *inode, struct page *page, - unsigned int offset, unsigned int bytes) + struct page *page, unsigned int offset, unsigned int bytes) { - struct nfs_server *server = NFS_SERVER(inode); + struct inode *inode = page->mapping->host; struct nfs_inode *nfsi = NFS_I(inode); struct nfs_page *req, *new = NULL; unsigned long rqend, end; end = offset + bytes; - if (nfs_wait_on_write_congestion(page->mapping, server->flags & NFS_MOUNT_INTR)) + if (nfs_wait_on_write_congestion(page->mapping, NFS_SERVER(inode)->flags & NFS_MOUNT_INTR)) return ERR_PTR(-ERESTARTSYS); for (;;) { /* Loop over all inode entries and see if we find * A request for the page we wish to update */ spin_lock(&nfsi->req_lock); - req = _nfs_find_request(inode, page->index); + req = nfs_page_find_request_locked(page); if (req) { if (!nfs_lock_request_dontget(req)) { int error; + spin_unlock(&nfsi->req_lock); error = nfs_wait_on_request(req); nfs_release_request(req); @@ -745,7 +674,6 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, return ERR_PTR(error); } spin_unlock(&nfsi->req_lock); - nfs_mark_request_dirty(new); return new; } spin_unlock(&nfsi->req_lock); @@ -786,9 +714,8 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, int nfs_flush_incompatible(struct file *file, struct page *page) { struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; - struct inode *inode = page->mapping->host; struct nfs_page *req; - int status = 0; + int do_flush, status; /* * Look for a request corresponding to this page. If there * is one, and it belongs to another file, we flush it out @@ -797,13 +724,18 @@ int nfs_flush_incompatible(struct file *file, struct page *page) * Also do the same if we find a request from an existing * dropped page. */ - req = nfs_find_request(inode, page->index); - if (req) { - if (req->wb_page != page || ctx != req->wb_context) - status = nfs_wb_page(inode, page); + do { + req = nfs_page_find_request(page); + if (req == NULL) + return 0; + do_flush = req->wb_page != page || req->wb_context != ctx + || !nfs_dirty_request(req); nfs_release_request(req); - } - return (status < 0) ? status : 0; + if (!do_flush) + return 0; + status = nfs_wb_page(page->mapping->host, page); + } while (status == 0); + return status; } /* @@ -817,7 +749,6 @@ int nfs_updatepage(struct file *file, struct page *page, { struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; struct inode *inode = page->mapping->host; - struct nfs_page *req; int status = 0; nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); @@ -827,62 +758,18 @@ int nfs_updatepage(struct file *file, struct page *page, file->f_dentry->d_name.name, count, (long long)(page_offset(page) +offset)); - if (IS_SYNC(inode)) { - status = nfs_writepage_sync(ctx, inode, page, offset, count, 0); - if (status > 0) { - if (offset == 0 && status == PAGE_CACHE_SIZE) - SetPageUptodate(page); - return 0; - } - return status; - } - /* If we're not using byte range locks, and we know the page * is entirely in cache, it may be more efficient to avoid * fragmenting write requests. */ if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) { - loff_t end_offs = i_size_read(inode) - 1; - unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT; - - count += offset; + count = max(count + offset, nfs_page_length(page)); offset = 0; - if (unlikely(end_offs < 0)) { - /* Do nothing */ - } else if (page->index == end_index) { - unsigned int pglen; - pglen = (unsigned int)(end_offs & (PAGE_CACHE_SIZE-1)) + 1; - if (count < pglen) - count = pglen; - } else if (page->index < end_index) - count = PAGE_CACHE_SIZE; } - /* - * Try to find an NFS request corresponding to this page - * and update it. - * If the existing request cannot be updated, we must flush - * it out now. - */ - do { - req = nfs_update_request(ctx, inode, page, offset, count); - status = (IS_ERR(req)) ? PTR_ERR(req) : 0; - if (status != -EBUSY) - break; - /* Request could not be updated. Flush it out and try again */ - status = nfs_wb_page(inode, page); - } while (status >= 0); - if (status < 0) - goto done; - - status = 0; + status = nfs_writepage_setup(ctx, page, offset, count); + __set_page_dirty_nobuffers(page); - /* Update file length */ - nfs_grow_file(page, offset, count); - /* Set the PG_uptodate flag? */ - nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); - nfs_unlock_request(req); -done: dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n", status, (long long)i_size_read(inode)); if (status < 0) @@ -897,7 +784,7 @@ static void nfs_writepage_release(struct nfs_page *req) #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) if (!PageError(req->wb_page)) { if (NFS_NEED_RESCHED(req)) { - nfs_mark_request_dirty(req); + nfs_redirty_request(req); goto out; } else if (NFS_NEED_COMMIT(req)) { nfs_mark_request_commit(req); @@ -979,9 +866,7 @@ static void nfs_execute_write(struct nfs_write_data *data) sigset_t oldset; rpc_clnt_sigmask(clnt, &oldset); - lock_kernel(); rpc_execute(&data->task); - unlock_kernel(); rpc_clnt_sigunmask(clnt, &oldset); } @@ -1015,7 +900,6 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how) atomic_set(&req->wb_complete, requests); ClearPageError(page); - set_page_writeback(page); offset = 0; nbytes = req->wb_bytes; do { @@ -1043,9 +927,9 @@ out_bad: while (!list_empty(&list)) { data = list_entry(list.next, struct nfs_write_data, pages); list_del(&data->pages); - nfs_writedata_free(data); + nfs_writedata_release(data); } - nfs_mark_request_dirty(req); + nfs_redirty_request(req); nfs_clear_page_writeback(req); return -ENOMEM; } @@ -1076,7 +960,6 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) nfs_list_remove_request(req); nfs_list_add_request(req, &data->pages); ClearPageError(req->wb_page); - set_page_writeback(req->wb_page); *pages++ = req->wb_page; count += req->wb_bytes; } @@ -1091,7 +974,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) while (!list_empty(head)) { struct nfs_page *req = nfs_list_entry(head->next); nfs_list_remove_request(req); - nfs_mark_request_dirty(req); + nfs_redirty_request(req); nfs_clear_page_writeback(req); } return -ENOMEM; @@ -1126,7 +1009,7 @@ out_err: while (!list_empty(head)) { req = nfs_list_entry(head->next); nfs_list_remove_request(req); - nfs_mark_request_dirty(req); + nfs_redirty_request(req); nfs_clear_page_writeback(req); } return error; @@ -1442,7 +1325,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata) } /* We have a mismatch. Write the page again */ dprintk(" mismatch\n"); - nfs_mark_request_dirty(req); + nfs_redirty_request(req); next: nfs_clear_page_writeback(req); } @@ -1459,18 +1342,17 @@ static inline int nfs_commit_list(struct inode *inode, struct list_head *head, i } #endif -static int nfs_flush_inode(struct inode *inode, unsigned long idx_start, - unsigned int npages, int how) +static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how) { - struct nfs_inode *nfsi = NFS_I(inode); + struct nfs_inode *nfsi = NFS_I(mapping->host); LIST_HEAD(head); - int res; + long res; spin_lock(&nfsi->req_lock); - res = nfs_scan_dirty(inode, &head, idx_start, npages); + res = nfs_scan_dirty(mapping, wbc, &head); spin_unlock(&nfsi->req_lock); if (res) { - int error = nfs_flush_list(inode, &head, res, how); + int error = nfs_flush_list(mapping->host, &head, res, how); if (error < 0) return error; } @@ -1496,38 +1378,62 @@ int nfs_commit_inode(struct inode *inode, int how) } #endif -int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, - unsigned int npages, int how) +long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) { + struct inode *inode = mapping->host; struct nfs_inode *nfsi = NFS_I(inode); + unsigned long idx_start, idx_end; + unsigned int npages = 0; LIST_HEAD(head); int nocommit = how & FLUSH_NOCOMMIT; - int pages, ret; - + long pages, ret; + + /* FIXME */ + if (wbc->range_cyclic) + idx_start = 0; + else { + idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; + idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; + if (idx_end > idx_start) { + unsigned long l_npages = 1 + idx_end - idx_start; + npages = l_npages; + if (sizeof(npages) != sizeof(l_npages) && + (unsigned long)npages != l_npages) + npages = 0; + } + } how &= ~FLUSH_NOCOMMIT; spin_lock(&nfsi->req_lock); do { + wbc->pages_skipped = 0; ret = nfs_wait_on_requests_locked(inode, idx_start, npages); if (ret != 0) continue; - pages = nfs_scan_dirty(inode, &head, idx_start, npages); + pages = nfs_scan_dirty(mapping, wbc, &head); if (pages != 0) { spin_unlock(&nfsi->req_lock); - if (how & FLUSH_INVALIDATE) + if (how & FLUSH_INVALIDATE) { nfs_cancel_dirty_list(&head); - else + ret = pages; + } else ret = nfs_flush_list(inode, &head, pages, how); spin_lock(&nfsi->req_lock); continue; } + if (wbc->pages_skipped != 0) + continue; if (nocommit) break; pages = nfs_scan_commit(inode, &head, idx_start, npages); - if (pages == 0) + if (pages == 0) { + if (wbc->pages_skipped != 0) + continue; break; + } if (how & FLUSH_INVALIDATE) { spin_unlock(&nfsi->req_lock); nfs_cancel_commit_list(&head); + ret = pages; spin_lock(&nfsi->req_lock); continue; } @@ -1540,6 +1446,106 @@ int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, return ret; } +/* + * flush the inode to disk. + */ +int nfs_wb_all(struct inode *inode) +{ + struct address_space *mapping = inode->i_mapping; + struct writeback_control wbc = { + .bdi = mapping->backing_dev_info, + .sync_mode = WB_SYNC_ALL, + .nr_to_write = LONG_MAX, + .for_writepages = 1, + .range_cyclic = 1, + }; + int ret; + + ret = generic_writepages(mapping, &wbc); + if (ret < 0) + goto out; + ret = nfs_sync_mapping_wait(mapping, &wbc, 0); + if (ret >= 0) + return 0; +out: + __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); + return ret; +} + +int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, loff_t range_end, int how) +{ + struct writeback_control wbc = { + .bdi = mapping->backing_dev_info, + .sync_mode = WB_SYNC_ALL, + .nr_to_write = LONG_MAX, + .range_start = range_start, + .range_end = range_end, + .for_writepages = 1, + }; + int ret; + + if (!(how & FLUSH_NOWRITEPAGE)) { + ret = generic_writepages(mapping, &wbc); + if (ret < 0) + goto out; + } + ret = nfs_sync_mapping_wait(mapping, &wbc, how); + if (ret >= 0) + return 0; +out: + __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); + return ret; +} + +int nfs_wb_page_priority(struct inode *inode, struct page *page, int how) +{ + loff_t range_start = page_offset(page); + loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); + struct writeback_control wbc = { + .bdi = page->mapping->backing_dev_info, + .sync_mode = WB_SYNC_ALL, + .nr_to_write = LONG_MAX, + .range_start = range_start, + .range_end = range_end, + }; + int ret; + + BUG_ON(!PageLocked(page)); + if (!(how & FLUSH_NOWRITEPAGE) && clear_page_dirty_for_io(page)) { + ret = nfs_writepage_locked(page, &wbc); + if (ret < 0) + goto out; + } + ret = nfs_sync_mapping_wait(page->mapping, &wbc, how); + if (ret >= 0) + return 0; +out: + __mark_inode_dirty(inode, I_DIRTY_PAGES); + return ret; +} + +/* + * Write back all requests on one page - we do this before reading it. + */ +int nfs_wb_page(struct inode *inode, struct page* page) +{ + return nfs_wb_page_priority(inode, page, FLUSH_STABLE); +} + +int nfs_set_page_dirty(struct page *page) +{ + struct nfs_page *req; + + req = nfs_page_find_request(page); + if (req != NULL) { + /* Mark any existing write requests for flushing */ + set_bit(PG_NEED_FLUSH, &req->wb_flags); + nfs_release_request(req); + } + return __set_page_dirty_nobuffers(page); +} + + int __init nfs_init_writepagecache(void) { nfs_wdata_cachep = kmem_cache_create("nfs_write_data", diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index 64db601c2bd2..7f5bad0393b1 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -258,7 +258,7 @@ nfsd3_proc_create(struct svc_rqst *rqstp, struct nfsd3_createargs *argp, /* Now create the file and set attributes */ nfserr = nfsd_create_v3(rqstp, dirfhp, argp->name, argp->len, attr, newfhp, - argp->createmode, argp->verf, NULL); + argp->createmode, argp->verf, NULL, NULL); RETURN_STATUS(nfserr); } diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index b4baca3053c3..277df40f098d 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c @@ -24,10 +24,6 @@ #define NFSDDBG_FACILITY NFSDDBG_XDR -#ifdef NFSD_OPTIMIZE_SPACE -# define inline -#endif - /* * Mapping of S_IF* types to NFS file types @@ -42,14 +38,14 @@ static u32 nfs3_ftypes[] = { /* * XDR functions for basic NFS types */ -static inline __be32 * +static __be32 * encode_time3(__be32 *p, struct timespec *time) { *p++ = htonl((u32) time->tv_sec); *p++ = htonl(time->tv_nsec); return p; } -static inline __be32 * +static __be32 * decode_time3(__be32 *p, struct timespec *time) { time->tv_sec = ntohl(*p++); @@ -57,7 +53,7 @@ decode_time3(__be32 *p, struct timespec *time) return p; } -static inline __be32 * +static __be32 * decode_fh(__be32 *p, struct svc_fh *fhp) { unsigned int size; @@ -77,7 +73,7 @@ __be32 *nfs3svc_decode_fh(__be32 *p, struct svc_fh *fhp) return decode_fh(p, fhp); } -static inline __be32 * +static __be32 * encode_fh(__be32 *p, struct svc_fh *fhp) { unsigned int size = fhp->fh_handle.fh_size; @@ -91,7 +87,7 @@ encode_fh(__be32 *p, struct svc_fh *fhp) * Decode a file name and make sure that the path contains * no slashes or null bytes. */ -static inline __be32 * +static __be32 * decode_filename(__be32 *p, char **namp, int *lenp) { char *name; @@ -107,7 +103,7 @@ decode_filename(__be32 *p, char **namp, int *lenp) return p; } -static inline __be32 * +static __be32 * decode_sattr3(__be32 *p, struct iattr *iap) { u32 tmp; @@ -153,7 +149,7 @@ decode_sattr3(__be32 *p, struct iattr *iap) return p; } -static inline __be32 * +static __be32 * encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat) { @@ -186,7 +182,7 @@ encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, return p; } -static inline __be32 * +static __be32 * encode_saved_post_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { struct inode *inode = fhp->fh_dentry->d_inode; @@ -776,7 +772,7 @@ nfs3svc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p, return xdr_ressize_check(rqstp, p); } -static inline __be32 * +static __be32 * encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen, ino_t ino) { @@ -790,7 +786,7 @@ encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, return p; } -static inline __be32 * +static __be32 * encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, struct svc_fh *fhp) { diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 0a7bbdc4a10a..50bc94243ca1 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -93,6 +93,7 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o { struct svc_fh resfh; __be32 status; + int created = 0; fh_init(&resfh, NFS4_FHSIZE); open->op_truncate = 0; @@ -105,28 +106,27 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o status = nfsd_create_v3(rqstp, current_fh, open->op_fname.data, open->op_fname.len, &open->op_iattr, &resfh, open->op_createmode, - (u32 *)open->op_verf.data, &open->op_truncate); - } - else { + (u32 *)open->op_verf.data, &open->op_truncate, &created); + } else { status = nfsd_lookup(rqstp, current_fh, open->op_fname.data, open->op_fname.len, &resfh); fh_unlock(current_fh); } + if (status) + goto out; - if (!status) { - set_change_info(&open->op_cinfo, current_fh); + set_change_info(&open->op_cinfo, current_fh); - /* set reply cache */ - fh_dup2(current_fh, &resfh); - open->op_stateowner->so_replay.rp_openfh_len = - resfh.fh_handle.fh_size; - memcpy(open->op_stateowner->so_replay.rp_openfh, - &resfh.fh_handle.fh_base, - resfh.fh_handle.fh_size); + /* set reply cache */ + fh_dup2(current_fh, &resfh); + open->op_stateowner->so_replay.rp_openfh_len = resfh.fh_handle.fh_size; + memcpy(open->op_stateowner->so_replay.rp_openfh, + &resfh.fh_handle.fh_base, resfh.fh_handle.fh_size); + if (!created) status = do_open_permission(rqstp, current_fh, open, MAY_NOP); - } +out: fh_put(&resfh); return status; } diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 293b6495829f..640c92b2a9f7 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -84,10 +84,10 @@ static void nfs4_set_recdir(char *recdir); */ static DEFINE_MUTEX(client_mutex); -static kmem_cache_t *stateowner_slab = NULL; -static kmem_cache_t *file_slab = NULL; -static kmem_cache_t *stateid_slab = NULL; -static kmem_cache_t *deleg_slab = NULL; +static struct kmem_cache *stateowner_slab = NULL; +static struct kmem_cache *file_slab = NULL; +static struct kmem_cache *stateid_slab = NULL; +static struct kmem_cache *deleg_slab = NULL; void nfs4_lock_state(void) @@ -1003,7 +1003,7 @@ alloc_init_file(struct inode *ino) } static void -nfsd4_free_slab(kmem_cache_t **slab) +nfsd4_free_slab(struct kmem_cache **slab) { if (*slab == NULL) return; @@ -1829,9 +1829,8 @@ out: } static struct workqueue_struct *laundry_wq; -static struct work_struct laundromat_work; -static void laundromat_main(void *); -static DECLARE_WORK(laundromat_work, laundromat_main, NULL); +static void laundromat_main(struct work_struct *); +static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main); __be32 nfsd4_renew(clientid_t *clid) @@ -1940,7 +1939,7 @@ nfs4_laundromat(void) } void -laundromat_main(void *not_used) +laundromat_main(struct work_struct *not_used) { time_t t; diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c index 56ebb1443e0e..f5243f943996 100644 --- a/fs/nfsd/nfsxdr.c +++ b/fs/nfsd/nfsxdr.c @@ -18,11 +18,6 @@ #define NFSDDBG_FACILITY NFSDDBG_XDR - -#ifdef NFSD_OPTIMIZE_SPACE -# define inline -#endif - /* * Mapping of S_IF* types to NFS file types */ @@ -55,7 +50,7 @@ __be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp) return decode_fh(p, fhp); } -static inline __be32 * +static __be32 * encode_fh(__be32 *p, struct svc_fh *fhp) { memcpy(p, &fhp->fh_handle.fh_base, NFS_FHSIZE); @@ -66,7 +61,7 @@ encode_fh(__be32 *p, struct svc_fh *fhp) * Decode a file name and make sure that the path contains * no slashes or null bytes. */ -static inline __be32 * +static __be32 * decode_filename(__be32 *p, char **namp, int *lenp) { char *name; @@ -82,7 +77,7 @@ decode_filename(__be32 *p, char **namp, int *lenp) return p; } -static inline __be32 * +static __be32 * decode_pathname(__be32 *p, char **namp, int *lenp) { char *name; @@ -98,7 +93,7 @@ decode_pathname(__be32 *p, char **namp, int *lenp) return p; } -static inline __be32 * +static __be32 * decode_sattr(__be32 *p, struct iattr *iap) { u32 tmp, tmp1; diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index f21e917bb8ed..bb4d926e4487 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1177,7 +1177,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, /* * Get the dir op function pointer. */ - err = nfserr_perm; + err = 0; switch (type) { case S_IFREG: host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL); @@ -1237,7 +1237,7 @@ __be32 nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp, char *fname, int flen, struct iattr *iap, struct svc_fh *resfhp, int createmode, u32 *verifier, - int *truncp) + int *truncp, int *created) { struct dentry *dentry, *dchild = NULL; struct inode *dirp; @@ -1331,6 +1331,8 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp, host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL); if (host_err < 0) goto out_nfserr; + if (created) + *created = 1; if (EX_ISSYNC(fhp->fh_export)) { err = nfserrno(nfsd_sync_dir(dentry)); diff --git a/fs/nls/nls_cp936.c b/fs/nls/nls_cp936.c index 046fde8170ea..65e640c61c8b 100644 --- a/fs/nls/nls_cp936.c +++ b/fs/nls/nls_cp936.c @@ -4421,6 +4421,73 @@ static wchar_t *page_charset2uni[256] = { c2u_F8, c2u_F9, c2u_FA, c2u_FB, c2u_FC, c2u_FD, c2u_FE, NULL, }; +static unsigned char u2c_00[512] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */ + 0xA1, 0xE8, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xEC, /* 0xA4-0xA7 */ + 0xA1, 0xA7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */ + 0xA1, 0xE3, 0xA1, 0xC0, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xA4, /* 0xB4-0xB7 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xC1, /* 0xD4-0xD7 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */ + 0xA8, 0xA4, 0xA8, 0xA2, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */ + 0xA8, 0xA8, 0xA8, 0xA6, 0xA8, 0xBA, 0x00, 0x00, /* 0xE8-0xEB */ + 0xA8, 0xAC, 0xA8, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */ + 0x00, 0x00, 0x00, 0x00, 0xA8, 0xB0, 0xA8, 0xAE, /* 0xF0-0xF3 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xC2, /* 0xF4-0xF7 */ + 0x00, 0x00, 0xA8, 0xB4, 0xA8, 0xB2, 0x00, 0x00, /* 0xF8-0xFB */ + 0xA8, 0xB9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */ +}; + static unsigned char u2c_01[512] = { 0xA8, 0xA1, 0xA8, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */ @@ -10825,7 +10892,7 @@ static unsigned char u2c_FF[512] = { }; static unsigned char *page_uni2charset[256] = { - NULL, u2c_01, u2c_02, u2c_03, u2c_04, NULL, NULL, NULL, + u2c_00, u2c_01, u2c_02, u2c_03, u2c_04, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -10936,11 +11003,34 @@ static int uni2char(const wchar_t uni, unsigned char *uni2charset; unsigned char cl = uni&0xFF; unsigned char ch = (uni>>8)&0xFF; - int n; + unsigned char out0,out1; if (boundlen <= 0) return -ENAMETOOLONG; + if (uni == 0x20ac) {/* Euro symbol.The only exception with a non-ascii unicode */ + out[0] = 0x80; + return 1; + } + + if (ch == 0) { /* handle the U00 plane*/ + /* if (cl == 0) return -EINVAL;*/ /*U0000 is legal in cp936*/ + out0 = u2c_00[cl*2]; + out1 = u2c_00[cl*2+1]; + if (out0 == 0x00 && out1 == 0x00) { + if (cl<0x80) { + out[0] = cl; + return 1; + } + return -EINVAL; + } else { + if (boundlen <= 1) + return -ENAMETOOLONG; + out[0] = out0; + out[1] = out1; + return 2; + } + } uni2charset = page_uni2charset[ch]; if (uni2charset) { @@ -10950,15 +11040,10 @@ static int uni2char(const wchar_t uni, out[1] = uni2charset[cl*2+1]; if (out[0] == 0x00 && out[1] == 0x00) return -EINVAL; - n = 2; - } else if (ch==0 && cl) { - out[0] = cl; - n = 1; + return 2; } else return -EINVAL; - - return n; } static int char2uni(const unsigned char *rawstring, int boundlen, @@ -10972,7 +11057,11 @@ static int char2uni(const unsigned char *rawstring, int boundlen, return -ENAMETOOLONG; if (boundlen == 1) { - *uni = rawstring[0]; + if (rawstring[0]==0x80) { /* Euro symbol.The only exception with a non-ascii unicode */ + *uni = 0x20ac; + } else { + *uni = rawstring[0]; + } return 1; } @@ -10986,7 +11075,11 @@ static int char2uni(const unsigned char *rawstring, int boundlen, return -EINVAL; n = 2; } else{ - *uni = ch; + if (ch==0x80) {/* Euro symbol.The only exception with a non-ascii unicode */ + *uni = 0x20ac; + } else { + *uni = ch; + } n = 1; } return n; diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c index 9f08e851cfb6..c577d8e1bd95 100644 --- a/fs/ntfs/attrib.c +++ b/fs/ntfs/attrib.c @@ -1272,7 +1272,7 @@ ntfs_attr_search_ctx *ntfs_attr_get_search_ctx(ntfs_inode *ni, MFT_RECORD *mrec) { ntfs_attr_search_ctx *ctx; - ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, SLAB_NOFS); + ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, GFP_NOFS); if (ctx) ntfs_attr_init_search_ctx(ctx, ni, mrec); return ctx; diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c index e32cde486362..2194eff49743 100644 --- a/fs/ntfs/index.c +++ b/fs/ntfs/index.c @@ -38,7 +38,7 @@ ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni) { ntfs_index_context *ictx; - ictx = kmem_cache_alloc(ntfs_index_ctx_cache, SLAB_NOFS); + ictx = kmem_cache_alloc(ntfs_index_ctx_cache, GFP_NOFS); if (ictx) *ictx = (ntfs_index_context){ .idx_ni = idx_ni }; return ictx; diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index 2d3de9c89818..247989891b4b 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -324,7 +324,7 @@ struct inode *ntfs_alloc_big_inode(struct super_block *sb) ntfs_inode *ni; ntfs_debug("Entering."); - ni = kmem_cache_alloc(ntfs_big_inode_cache, SLAB_NOFS); + ni = kmem_cache_alloc(ntfs_big_inode_cache, GFP_NOFS); if (likely(ni != NULL)) { ni->state = 0; return VFS_I(ni); @@ -349,7 +349,7 @@ static inline ntfs_inode *ntfs_alloc_extent_inode(void) ntfs_inode *ni; ntfs_debug("Entering."); - ni = kmem_cache_alloc(ntfs_inode_cache, SLAB_NOFS); + ni = kmem_cache_alloc(ntfs_inode_cache, GFP_NOFS); if (likely(ni != NULL)) { ni->state = 0; return ni; diff --git a/fs/ntfs/unistr.c b/fs/ntfs/unistr.c index 6a495f7369f9..005ca4b0f132 100644 --- a/fs/ntfs/unistr.c +++ b/fs/ntfs/unistr.c @@ -266,7 +266,7 @@ int ntfs_nlstoucs(const ntfs_volume *vol, const char *ins, /* We do not trust outside sources. */ if (likely(ins)) { - ucs = kmem_cache_alloc(ntfs_name_cache, SLAB_NOFS); + ucs = kmem_cache_alloc(ntfs_name_cache, GFP_NOFS); if (likely(ucs)) { for (i = o = 0; i < ins_len; i += wc_len) { wc_len = nls->char2uni(ins + i, ins_len - i, diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index f43bc5f18a35..edc91ca3792a 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -52,14 +52,14 @@ static int ocfs2_extent_contig(struct inode *inode, u64 blkno); static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *inode, int wanted, struct ocfs2_alloc_context *meta_ac, struct buffer_head *bhs[]); static int ocfs2_add_branch(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *inode, struct buffer_head *fe_bh, struct buffer_head *eb_bh, @@ -67,14 +67,14 @@ static int ocfs2_add_branch(struct ocfs2_super *osb, struct ocfs2_alloc_context *meta_ac); static int ocfs2_shift_tree_depth(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *inode, struct buffer_head *fe_bh, struct ocfs2_alloc_context *meta_ac, struct buffer_head **ret_new_eb_bh); static int ocfs2_do_insert_extent(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *inode, struct buffer_head *fe_bh, u64 blkno, @@ -152,7 +152,7 @@ bail: * l_count for you */ static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *inode, int wanted, struct ocfs2_alloc_context *meta_ac, @@ -253,7 +253,7 @@ bail: * contain a single record with e_clusters == 0. */ static int ocfs2_add_branch(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *inode, struct buffer_head *fe_bh, struct buffer_head *eb_bh, @@ -418,7 +418,7 @@ bail: * after this call. */ static int ocfs2_shift_tree_depth(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *inode, struct buffer_head *fe_bh, struct ocfs2_alloc_context *meta_ac, @@ -520,7 +520,7 @@ bail: * down. */ static int ocfs2_do_insert_extent(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *inode, struct buffer_head *fe_bh, u64 start_blk, @@ -809,7 +809,7 @@ bail: /* the caller needs to update fe->i_clusters */ int ocfs2_insert_extent(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *inode, struct buffer_head *fe_bh, u64 start_blk, @@ -951,7 +951,7 @@ static int ocfs2_truncate_log_can_coalesce(struct ocfs2_truncate_log *tl, } static int ocfs2_truncate_log_append(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, u64 start_blk, unsigned int num_clusters) { @@ -1034,7 +1034,7 @@ bail: } static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *data_alloc_inode, struct buffer_head *data_alloc_bh) { @@ -1113,7 +1113,7 @@ static int __ocfs2_flush_truncate_log(struct ocfs2_super *osb) { int status; unsigned int num_to_flush; - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle; struct inode *tl_inode = osb->osb_tl_inode; struct inode *data_alloc_inode = NULL; struct buffer_head *tl_bh = osb->osb_tl_bh; @@ -1130,7 +1130,7 @@ static int __ocfs2_flush_truncate_log(struct ocfs2_super *osb) if (!OCFS2_IS_VALID_DINODE(di)) { OCFS2_RO_ON_INVALID_DINODE(osb->sb, di); status = -EIO; - goto bail; + goto out; } num_to_flush = le16_to_cpu(tl->tl_used); @@ -1138,14 +1138,7 @@ static int __ocfs2_flush_truncate_log(struct ocfs2_super *osb) num_to_flush, (unsigned long long)OCFS2_I(tl_inode)->ip_blkno); if (!num_to_flush) { status = 0; - goto bail; - } - - handle = ocfs2_alloc_handle(osb); - if (!handle) { - status = -ENOMEM; - mlog_errno(status); - goto bail; + goto out; } data_alloc_inode = ocfs2_get_system_file_inode(osb, @@ -1154,41 +1147,40 @@ static int __ocfs2_flush_truncate_log(struct ocfs2_super *osb) if (!data_alloc_inode) { status = -EINVAL; mlog(ML_ERROR, "Could not get bitmap inode!\n"); - goto bail; + goto out; } - ocfs2_handle_add_inode(handle, data_alloc_inode); - status = ocfs2_meta_lock(data_alloc_inode, handle, &data_alloc_bh, 1); + mutex_lock(&data_alloc_inode->i_mutex); + + status = ocfs2_meta_lock(data_alloc_inode, &data_alloc_bh, 1); if (status < 0) { mlog_errno(status); - goto bail; + goto out_mutex; } - handle = ocfs2_start_trans(osb, handle, OCFS2_TRUNCATE_LOG_UPDATE); + handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); if (IS_ERR(handle)) { status = PTR_ERR(handle); - handle = NULL; mlog_errno(status); - goto bail; + goto out_unlock; } status = ocfs2_replay_truncate_records(osb, handle, data_alloc_inode, data_alloc_bh); - if (status < 0) { + if (status < 0) mlog_errno(status); - goto bail; - } -bail: - if (handle) - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); - if (data_alloc_inode) - iput(data_alloc_inode); +out_unlock: + brelse(data_alloc_bh); + ocfs2_meta_unlock(data_alloc_inode, 1); - if (data_alloc_bh) - brelse(data_alloc_bh); +out_mutex: + mutex_unlock(&data_alloc_inode->i_mutex); + iput(data_alloc_inode); +out: mlog_exit(status); return status; } @@ -1205,10 +1197,12 @@ int ocfs2_flush_truncate_log(struct ocfs2_super *osb) return status; } -static void ocfs2_truncate_log_worker(void *data) +static void ocfs2_truncate_log_worker(struct work_struct *work) { int status; - struct ocfs2_super *osb = data; + struct ocfs2_super *osb = + container_of(work, struct ocfs2_super, + osb_truncate_log_wq.work); mlog_entry_void(); @@ -1347,7 +1341,7 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb, int i; unsigned int clusters, num_recs, start_cluster; u64 start_blk; - struct ocfs2_journal_handle *handle; + handle_t *handle; struct inode *tl_inode = osb->osb_tl_inode; struct ocfs2_truncate_log *tl; @@ -1373,8 +1367,7 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb, } } - handle = ocfs2_start_trans(osb, NULL, - OCFS2_TRUNCATE_LOG_UPDATE); + handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); @@ -1387,7 +1380,7 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb, status = ocfs2_truncate_log_append(osb, handle, start_blk, clusters); - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); if (status < 0) { mlog_errno(status); goto bail_up; @@ -1441,7 +1434,8 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb) /* ocfs2_truncate_log_shutdown keys on the existence of * osb->osb_tl_inode so we don't set any of the osb variables * until we're sure all is well. */ - INIT_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker, osb); + INIT_DELAYED_WORK(&osb->osb_truncate_log_wq, + ocfs2_truncate_log_worker); osb->osb_tl_bh = tl_bh; osb->osb_tl_inode = tl_inode; @@ -1543,7 +1537,7 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb, struct inode *inode, struct buffer_head *fe_bh, struct buffer_head *old_last_eb_bh, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct ocfs2_truncate_context *tc) { int status, i, depth; @@ -1782,7 +1776,7 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb, struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; struct buffer_head *last_eb_bh; - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle = NULL; struct inode *tl_inode = osb->osb_tl_inode; mlog_entry_void(); @@ -1868,7 +1862,7 @@ start: credits = ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del, fe, el); - handle = ocfs2_start_trans(osb, NULL, credits); + handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; @@ -1891,7 +1885,7 @@ start: mutex_unlock(&tl_inode->i_mutex); tl_sem = 0; - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); handle = NULL; BUG_ON(le32_to_cpu(fe->i_clusters) < target_i_clusters); @@ -1906,7 +1900,7 @@ bail: mutex_unlock(&tl_inode->i_mutex); if (handle) - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); if (last_eb_bh) brelse(last_eb_bh); @@ -2011,10 +2005,7 @@ int ocfs2_prepare_truncate(struct ocfs2_super *osb, mutex_lock(&ext_alloc_inode->i_mutex); (*tc)->tc_ext_alloc_inode = ext_alloc_inode; - status = ocfs2_meta_lock(ext_alloc_inode, - NULL, - &ext_alloc_bh, - 1); + status = ocfs2_meta_lock(ext_alloc_inode, &ext_alloc_bh, 1); if (status < 0) { mlog_errno(status); goto bail; diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index 12ba897743f4..0b82e8044325 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h @@ -28,7 +28,7 @@ struct ocfs2_alloc_context; int ocfs2_insert_extent(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *inode, struct buffer_head *fe_bh, u64 blkno, diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 3d7c082a8f58..2f7268e81520 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -200,7 +200,7 @@ static int ocfs2_readpage(struct file *file, struct page *page) mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0)); - ret = ocfs2_meta_lock_with_page(inode, NULL, NULL, 0, page); + ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page); if (ret != 0) { if (ret == AOP_TRUNCATED_PAGE) unlock = 0; @@ -305,7 +305,7 @@ static int ocfs2_prepare_write(struct file *file, struct page *page, mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to); - ret = ocfs2_meta_lock_with_page(inode, NULL, NULL, 0, page); + ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page); if (ret != 0) { mlog_errno(ret); goto out; @@ -355,16 +355,16 @@ static int walk_page_buffers( handle_t *handle, return ret; } -struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode, +handle_t *ocfs2_start_walk_page_trans(struct inode *inode, struct page *page, unsigned from, unsigned to) { struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle = NULL; int ret = 0; - handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS); + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (!handle) { ret = -ENOMEM; mlog_errno(ret); @@ -372,7 +372,7 @@ struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode, } if (ocfs2_should_order_data(inode)) { - ret = walk_page_buffers(handle->k_handle, + ret = walk_page_buffers(handle, page_buffers(page), from, to, NULL, ocfs2_journal_dirty_data); @@ -382,7 +382,7 @@ struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode, out: if (ret) { if (handle) - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); handle = ERR_PTR(ret); } return handle; @@ -394,7 +394,7 @@ static int ocfs2_commit_write(struct file *file, struct page *page, int ret; struct buffer_head *di_bh = NULL; struct inode *inode = page->mapping->host; - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle = NULL; struct ocfs2_dinode *di; mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to); @@ -412,7 +412,7 @@ static int ocfs2_commit_write(struct file *file, struct page *page, * stale inode allocation image (i_size, i_clusters, etc). */ - ret = ocfs2_meta_lock_with_page(inode, NULL, &di_bh, 1, page); + ret = ocfs2_meta_lock_with_page(inode, &di_bh, 1, page); if (ret != 0) { mlog_errno(ret); goto out; @@ -464,7 +464,7 @@ static int ocfs2_commit_write(struct file *file, struct page *page, } out_commit: - ocfs2_commit_trans(handle); + ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); out_unlock_data: ocfs2_data_unlock(inode, 1); out_unlock_meta: @@ -490,7 +490,7 @@ static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) * accessed concurrently from multiple nodes. */ if (!INODE_JOURNAL(inode)) { - err = ocfs2_meta_lock(inode, NULL, NULL, 0); + err = ocfs2_meta_lock(inode, NULL, 0); if (err) { if (err != -ENOENT) mlog_errno(err); diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index e88c3f0b8fa9..f446a15eab88 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h @@ -25,7 +25,7 @@ int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page, unsigned from, unsigned to); -struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode, +handle_t *ocfs2_start_walk_page_trans(struct inode *inode, struct page *page, unsigned from, unsigned to); diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 305cba3681fe..4cd9a9580456 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -141,7 +141,7 @@ struct o2hb_region { * recognizes a node going up and down in one iteration */ u64 hr_generation; - struct work_struct hr_write_timeout_work; + struct delayed_work hr_write_timeout_work; unsigned long hr_last_timeout_start; /* Used during o2hb_check_slot to hold a copy of the block @@ -156,9 +156,11 @@ struct o2hb_bio_wait_ctxt { int wc_error; }; -static void o2hb_write_timeout(void *arg) +static void o2hb_write_timeout(struct work_struct *work) { - struct o2hb_region *reg = arg; + struct o2hb_region *reg = + container_of(work, struct o2hb_region, + hr_write_timeout_work.work); mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " "milliseconds\n", reg->hr_dev_name, @@ -1404,7 +1406,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, goto out; } - INIT_WORK(®->hr_write_timeout_work, o2hb_write_timeout, reg); + INIT_DELAYED_WORK(®->hr_write_timeout_work, o2hb_write_timeout); /* * A node is considered live after it has beat LIVE_THRESHOLD diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c index 7bba98fbfc15..4705d659fe57 100644 --- a/fs/ocfs2/cluster/quorum.c +++ b/fs/ocfs2/cluster/quorum.c @@ -88,7 +88,7 @@ void o2quo_disk_timeout(void) o2quo_fence_self(); } -static void o2quo_make_decision(void *arg) +static void o2quo_make_decision(struct work_struct *work) { int quorum; int lowest_hb, lowest_reachable = 0, fence = 0; @@ -306,7 +306,7 @@ void o2quo_init(void) struct o2quo_state *qs = &o2quo_state; spin_lock_init(&qs->qs_lock); - INIT_WORK(&qs->qs_work, o2quo_make_decision, NULL); + INIT_WORK(&qs->qs_work, o2quo_make_decision); } void o2quo_exit(void) diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index b650efa8c8be..9b3209dc0b16 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -140,11 +140,11 @@ static int o2net_sys_err_translations[O2NET_ERR_MAX] = [O2NET_ERR_DIED] = -EHOSTDOWN,}; /* can't quite avoid *all* internal declarations :/ */ -static void o2net_sc_connect_completed(void *arg); -static void o2net_rx_until_empty(void *arg); -static void o2net_shutdown_sc(void *arg); +static void o2net_sc_connect_completed(struct work_struct *work); +static void o2net_rx_until_empty(struct work_struct *work); +static void o2net_shutdown_sc(struct work_struct *work); static void o2net_listen_data_ready(struct sock *sk, int bytes); -static void o2net_sc_send_keep_req(void *arg); +static void o2net_sc_send_keep_req(struct work_struct *work); static void o2net_idle_timer(unsigned long data); static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); @@ -308,10 +308,10 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node) o2nm_node_get(node); sc->sc_node = node; - INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed, sc); - INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty, sc); - INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc, sc); - INIT_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req, sc); + INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed); + INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty); + INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc); + INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req); init_timer(&sc->sc_idle_timeout); sc->sc_idle_timeout.function = o2net_idle_timer; @@ -342,7 +342,7 @@ static void o2net_sc_queue_work(struct o2net_sock_container *sc, sc_put(sc); } static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, - struct work_struct *work, + struct delayed_work *work, int delay) { sc_get(sc); @@ -350,7 +350,7 @@ static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, sc_put(sc); } static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc, - struct work_struct *work) + struct delayed_work *work) { if (cancel_delayed_work(work)) sc_put(sc); @@ -564,9 +564,11 @@ static void o2net_ensure_shutdown(struct o2net_node *nn, * ourselves as state_change couldn't get the nn_lock and call set_nn_state * itself. */ -static void o2net_shutdown_sc(void *arg) +static void o2net_shutdown_sc(struct work_struct *work) { - struct o2net_sock_container *sc = arg; + struct o2net_sock_container *sc = + container_of(work, struct o2net_sock_container, + sc_shutdown_work); struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); sclog(sc, "shutting down\n"); @@ -1201,9 +1203,10 @@ out: /* this work func is triggerd by data ready. it reads until it can read no * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing * our work the work struct will be marked and we'll be called again. */ -static void o2net_rx_until_empty(void *arg) +static void o2net_rx_until_empty(struct work_struct *work) { - struct o2net_sock_container *sc = arg; + struct o2net_sock_container *sc = + container_of(work, struct o2net_sock_container, sc_rx_work); int ret; do { @@ -1249,9 +1252,11 @@ static int o2net_set_nodelay(struct socket *sock) /* called when a connect completes and after a sock is accepted. the * rx path will see the response and mark the sc valid */ -static void o2net_sc_connect_completed(void *arg) +static void o2net_sc_connect_completed(struct work_struct *work) { - struct o2net_sock_container *sc = arg; + struct o2net_sock_container *sc = + container_of(work, struct o2net_sock_container, + sc_connect_work); mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n", (unsigned long long)O2NET_PROTOCOL_VERSION, @@ -1262,9 +1267,11 @@ static void o2net_sc_connect_completed(void *arg) } /* this is called as a work_struct func. */ -static void o2net_sc_send_keep_req(void *arg) +static void o2net_sc_send_keep_req(struct work_struct *work) { - struct o2net_sock_container *sc = arg; + struct o2net_sock_container *sc = + container_of(work, struct o2net_sock_container, + sc_keepalive_work.work); o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req)); sc_put(sc); @@ -1314,14 +1321,15 @@ static void o2net_sc_postpone_idle(struct o2net_sock_container *sc) * having a connect attempt fail, etc. This centralizes the logic which decides * if a connect attempt should be made or if we should give up and all future * transmit attempts should fail */ -static void o2net_start_connect(void *arg) +static void o2net_start_connect(struct work_struct *work) { - struct o2net_node *nn = arg; + struct o2net_node *nn = + container_of(work, struct o2net_node, nn_connect_work.work); struct o2net_sock_container *sc = NULL; struct o2nm_node *node = NULL, *mynode = NULL; struct socket *sock = NULL; struct sockaddr_in myaddr = {0, }, remoteaddr = {0, }; - int ret = 0; + int ret = 0, stop; /* if we're greater we initiate tx, otherwise we accept */ if (o2nm_this_node() <= o2net_num_from_nn(nn)) @@ -1342,10 +1350,9 @@ static void o2net_start_connect(void *arg) spin_lock(&nn->nn_lock); /* see if we already have one pending or have given up */ - if (nn->nn_sc || nn->nn_persistent_error) - arg = NULL; + stop = (nn->nn_sc || nn->nn_persistent_error); spin_unlock(&nn->nn_lock); - if (arg == NULL) /* *shrug*, needed some indicator */ + if (stop) goto out; nn->nn_last_connect_attempt = jiffies; @@ -1421,9 +1428,10 @@ out: return; } -static void o2net_connect_expired(void *arg) +static void o2net_connect_expired(struct work_struct *work) { - struct o2net_node *nn = arg; + struct o2net_node *nn = + container_of(work, struct o2net_node, nn_connect_expired.work); spin_lock(&nn->nn_lock); if (!nn->nn_sc_valid) { @@ -1436,9 +1444,10 @@ static void o2net_connect_expired(void *arg) spin_unlock(&nn->nn_lock); } -static void o2net_still_up(void *arg) +static void o2net_still_up(struct work_struct *work) { - struct o2net_node *nn = arg; + struct o2net_node *nn = + container_of(work, struct o2net_node, nn_still_up.work); o2quo_hb_still_up(o2net_num_from_nn(nn)); } @@ -1644,9 +1653,9 @@ out: return ret; } -static void o2net_accept_many(void *arg) +static void o2net_accept_many(struct work_struct *work) { - struct socket *sock = arg; + struct socket *sock = o2net_listen_sock; while (o2net_accept_one(sock) == 0) cond_resched(); } @@ -1700,7 +1709,7 @@ static int o2net_open_listening_sock(__be16 port) write_unlock_bh(&sock->sk->sk_callback_lock); o2net_listen_sock = sock; - INIT_WORK(&o2net_listen_work, o2net_accept_many, sock); + INIT_WORK(&o2net_listen_work, o2net_accept_many); sock->sk->sk_reuse = 1; ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); @@ -1819,9 +1828,10 @@ int o2net_init(void) struct o2net_node *nn = o2net_nn_from_num(i); spin_lock_init(&nn->nn_lock); - INIT_WORK(&nn->nn_connect_work, o2net_start_connect, nn); - INIT_WORK(&nn->nn_connect_expired, o2net_connect_expired, nn); - INIT_WORK(&nn->nn_still_up, o2net_still_up, nn); + INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect); + INIT_DELAYED_WORK(&nn->nn_connect_expired, + o2net_connect_expired); + INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up); /* until we see hb from a node we'll return einval */ nn->nn_persistent_error = -ENOTCONN; init_waitqueue_head(&nn->nn_sc_wq); diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h index 4b46aac7d243..daebbd3a2c8c 100644 --- a/fs/ocfs2/cluster/tcp_internal.h +++ b/fs/ocfs2/cluster/tcp_internal.h @@ -86,18 +86,18 @@ struct o2net_node { * connect attempt fails and so can be self-arming. shutdown is * careful to first mark the nn such that no connects will be attempted * before canceling delayed connect work and flushing the queue. */ - struct work_struct nn_connect_work; + struct delayed_work nn_connect_work; unsigned long nn_last_connect_attempt; /* this is queued as nodes come up and is canceled when a connection is * established. this expiring gives up on the node and errors out * transmits */ - struct work_struct nn_connect_expired; + struct delayed_work nn_connect_expired; /* after we give up on a socket we wait a while before deciding * that it is still heartbeating and that we should do some * quorum work */ - struct work_struct nn_still_up; + struct delayed_work nn_still_up; }; struct o2net_sock_container { @@ -129,7 +129,7 @@ struct o2net_sock_container { struct work_struct sc_shutdown_work; struct timer_list sc_idle_timeout; - struct work_struct sc_keepalive_work; + struct delayed_work sc_keepalive_work; unsigned sc_handshake_ok:1; diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 04e01915b86e..baad2aa27c14 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -82,6 +82,7 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir) struct inode *inode = filp->f_dentry->d_inode; struct super_block * sb = inode->i_sb; unsigned int ra_sectors = 16; + int lock_level = 0; mlog_entry("dirino=%llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); @@ -89,7 +90,15 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir) stored = 0; bh = NULL; - error = ocfs2_meta_lock(inode, NULL, NULL, 0); + error = ocfs2_meta_lock_atime(inode, filp->f_vfsmnt, &lock_level); + if (lock_level && error >= 0) { + /* We release EX lock which used to update atime + * and get PR lock again to reduce contention + * on commonly accessed directories. */ + ocfs2_meta_unlock(inode, 1); + lock_level = 0; + error = ocfs2_meta_lock(inode, NULL, 0); + } if (error < 0) { if (error != -ENOENT) mlog_errno(error); @@ -198,7 +207,7 @@ revalidate: stored = 0; bail: - ocfs2_meta_unlock(inode, 0); + ocfs2_meta_unlock(inode, lock_level); bail_nolock: mlog_exit(stored); @@ -340,7 +349,7 @@ int ocfs2_empty_dir(struct inode *inode) /* returns a bh of the 1st new block in the allocation. */ int ocfs2_do_extend_dir(struct super_block *sb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *dir, struct buffer_head *parent_fe_bh, struct ocfs2_alloc_context *data_ac, @@ -398,7 +407,7 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb, struct ocfs2_dinode *fe = (struct ocfs2_dinode *) parent_fe_bh->b_data; struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_alloc_context *meta_ac = NULL; - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle = NULL; struct buffer_head *new_bh = NULL; struct ocfs2_dir_entry * de; struct super_block *sb = osb->sb; @@ -409,13 +418,6 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb, mlog(0, "extending dir %llu (i_size = %lld)\n", (unsigned long long)OCFS2_I(dir)->ip_blkno, dir_i_size); - handle = ocfs2_alloc_handle(osb); - if (handle == NULL) { - status = -ENOMEM; - mlog_errno(status); - goto bail; - } - /* dir->i_size is always block aligned. */ spin_lock(&OCFS2_I(dir)->ip_lock); if (dir_i_size == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)) { @@ -428,8 +430,7 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb, } if (!num_free_extents) { - status = ocfs2_reserve_new_metadata(osb, handle, - fe, &meta_ac); + status = ocfs2_reserve_new_metadata(osb, fe, &meta_ac); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); @@ -437,7 +438,7 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb, } } - status = ocfs2_reserve_clusters(osb, handle, 1, &data_ac); + status = ocfs2_reserve_clusters(osb, 1, &data_ac); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); @@ -450,7 +451,7 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb, credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS; } - handle = ocfs2_start_trans(osb, handle, credits); + handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; @@ -496,7 +497,7 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb, get_bh(*new_de_bh); bail: if (handle) - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); if (data_ac) ocfs2_free_alloc_context(data_ac); diff --git a/fs/ocfs2/dir.h b/fs/ocfs2/dir.h index 5f614ec9649c..3f67e146864a 100644 --- a/fs/ocfs2/dir.h +++ b/fs/ocfs2/dir.h @@ -45,7 +45,7 @@ int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb, struct buffer_head **ret_de_bh); struct ocfs2_alloc_context; int ocfs2_do_extend_dir(struct super_block *sb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *dir, struct buffer_head *parent_fe_bh, struct ocfs2_alloc_context *data_ac, diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index fa968180b072..6b6ff76538c5 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h @@ -153,7 +153,7 @@ static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned * called functions that cannot be directly called from the * net message handlers for some reason, usually because * they need to send net messages of their own. */ -void dlm_dispatch_work(void *data); +void dlm_dispatch_work(struct work_struct *work); struct dlm_lock_resource; struct dlm_work_item; diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 8d1065f8b3bd..420a375a3949 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -68,7 +68,8 @@ static void **dlm_alloc_pagevec(int pages) goto out_free; mlog(0, "Allocated DLM hash pagevec; %d pages (%lu expected), %lu buckets per page\n", - pages, DLM_HASH_PAGES, (unsigned long)DLM_BUCKETS_PER_PAGE); + pages, (unsigned long)DLM_HASH_PAGES, + (unsigned long)DLM_BUCKETS_PER_PAGE); return vec; out_free: dlm_free_pagevec(vec, i); @@ -1296,7 +1297,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, spin_lock_init(&dlm->work_lock); INIT_LIST_HEAD(&dlm->work_list); - INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm); + INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work); kref_init(&dlm->dlm_refs); dlm->dlm_state = DLM_CTXT_NEW; diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c index 16b8d1ba7066..941acf14e61f 100644 --- a/fs/ocfs2/dlm/dlmfs.c +++ b/fs/ocfs2/dlm/dlmfs.c @@ -66,7 +66,7 @@ static struct file_operations dlmfs_file_operations; static struct inode_operations dlmfs_dir_inode_operations; static struct inode_operations dlmfs_root_inode_operations; static struct inode_operations dlmfs_file_inode_operations; -static kmem_cache_t *dlmfs_inode_cache; +static struct kmem_cache *dlmfs_inode_cache; struct workqueue_struct *user_dlm_worker; @@ -257,7 +257,7 @@ static ssize_t dlmfs_file_write(struct file *filp, } static void dlmfs_init_once(void *foo, - kmem_cache_t *cachep, + struct kmem_cache *cachep, unsigned long flags) { struct dlmfs_inode_private *ip = @@ -276,7 +276,7 @@ static struct inode *dlmfs_alloc_inode(struct super_block *sb) { struct dlmfs_inode_private *ip; - ip = kmem_cache_alloc(dlmfs_inode_cache, SLAB_NOFS); + ip = kmem_cache_alloc(dlmfs_inode_cache, GFP_NOFS); if (!ip) return NULL; diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index f784177b6241..856012b4fa49 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -221,7 +221,7 @@ EXPORT_SYMBOL_GPL(dlm_dump_all_mles); #endif /* 0 */ -static kmem_cache_t *dlm_mle_cache = NULL; +static struct kmem_cache *dlm_mle_cache = NULL; static void dlm_mle_release(struct kref *kref); diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 9d950d7cea38..fb3e2b0817f1 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -153,9 +153,10 @@ static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) } /* Worker function used during recovery. */ -void dlm_dispatch_work(void *data) +void dlm_dispatch_work(struct work_struct *work) { - struct dlm_ctxt *dlm = (struct dlm_ctxt *)data; + struct dlm_ctxt *dlm = + container_of(work, struct dlm_ctxt, dispatched_work); LIST_HEAD(tmp_list); struct list_head *iter, *iter2; struct dlm_work_item *item; diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c index eead48bbfac6..7d2f578b267d 100644 --- a/fs/ocfs2/dlm/userdlm.c +++ b/fs/ocfs2/dlm/userdlm.c @@ -171,15 +171,14 @@ static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres) BUG(); } -static void user_dlm_unblock_lock(void *opaque); +static void user_dlm_unblock_lock(struct work_struct *work); static void __user_dlm_queue_lockres(struct user_lock_res *lockres) { if (!(lockres->l_flags & USER_LOCK_QUEUED)) { user_dlm_grab_inode_ref(lockres); - INIT_WORK(&lockres->l_work, user_dlm_unblock_lock, - lockres); + INIT_WORK(&lockres->l_work, user_dlm_unblock_lock); queue_work(user_dlm_worker, &lockres->l_work); lockres->l_flags |= USER_LOCK_QUEUED; @@ -279,10 +278,11 @@ static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres) iput(inode); } -static void user_dlm_unblock_lock(void *opaque) +static void user_dlm_unblock_lock(struct work_struct *work) { int new_level, status; - struct user_lock_res *lockres = (struct user_lock_res *) opaque; + struct user_lock_res *lockres = + container_of(work, struct user_lock_res, l_work); struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); mlog(0, "processing lockres %.*s\n", lockres->l_namelen, diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 8801e41afe80..69fba16efbd1 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -49,6 +49,7 @@ #include "dcache.h" #include "dlmglue.h" #include "extent_map.h" +#include "file.h" #include "heartbeat.h" #include "inode.h" #include "journal.h" @@ -1063,10 +1064,10 @@ static void ocfs2_cluster_unlock(struct ocfs2_super *osb, mlog_exit_void(); } -int ocfs2_create_new_lock(struct ocfs2_super *osb, - struct ocfs2_lock_res *lockres, - int ex, - int local) +static int ocfs2_create_new_lock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres, + int ex, + int local) { int level = ex ? LKM_EXMODE : LKM_PRMODE; unsigned long flags; @@ -1579,7 +1580,6 @@ static int ocfs2_assign_bh(struct inode *inode, * the result of the lock will be communicated via the callback. */ int ocfs2_meta_lock_full(struct inode *inode, - struct ocfs2_journal_handle *handle, struct buffer_head **ret_bh, int ex, int arg_flags) @@ -1668,12 +1668,6 @@ int ocfs2_meta_lock_full(struct inode *inode, } } - if (handle) { - status = ocfs2_handle_add_lock(handle, inode); - if (status < 0) - mlog_errno(status); - } - bail: if (status < 0) { if (ret_bh && (*ret_bh)) { @@ -1713,18 +1707,16 @@ bail: * the lock inversion simply. */ int ocfs2_meta_lock_with_page(struct inode *inode, - struct ocfs2_journal_handle *handle, struct buffer_head **ret_bh, int ex, struct page *page) { int ret; - ret = ocfs2_meta_lock_full(inode, handle, ret_bh, ex, - OCFS2_LOCK_NONBLOCK); + ret = ocfs2_meta_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK); if (ret == -EAGAIN) { unlock_page(page); - if (ocfs2_meta_lock(inode, handle, ret_bh, ex) == 0) + if (ocfs2_meta_lock(inode, ret_bh, ex) == 0) ocfs2_meta_unlock(inode, ex); ret = AOP_TRUNCATED_PAGE; } @@ -1732,6 +1724,44 @@ int ocfs2_meta_lock_with_page(struct inode *inode, return ret; } +int ocfs2_meta_lock_atime(struct inode *inode, + struct vfsmount *vfsmnt, + int *level) +{ + int ret; + + mlog_entry_void(); + ret = ocfs2_meta_lock(inode, NULL, 0); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + /* + * If we should update atime, we will get EX lock, + * otherwise we just get PR lock. + */ + if (ocfs2_should_update_atime(inode, vfsmnt)) { + struct buffer_head *bh = NULL; + + ocfs2_meta_unlock(inode, 0); + ret = ocfs2_meta_lock(inode, &bh, 1); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + *level = 1; + if (ocfs2_should_update_atime(inode, vfsmnt)) + ocfs2_update_inode_atime(inode, bh); + if (bh) + brelse(bh); + } else + *level = 0; + + mlog_exit(ret); + return ret; +} + void ocfs2_meta_unlock(struct inode *inode, int ex) { diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h index 4a2769387229..c343fca68cf1 100644 --- a/fs/ocfs2/dlmglue.h +++ b/fs/ocfs2/dlmglue.h @@ -68,8 +68,6 @@ void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl, u64 parent, struct inode *inode); void ocfs2_lock_res_free(struct ocfs2_lock_res *res); int ocfs2_create_new_inode_locks(struct inode *inode); -int ocfs2_create_new_lock(struct ocfs2_super *osb, - struct ocfs2_lock_res *lockres, int ex, int local); int ocfs2_drop_inode_locks(struct inode *inode); int ocfs2_data_lock_full(struct inode *inode, int write, @@ -82,19 +80,20 @@ void ocfs2_data_unlock(struct inode *inode, int write); int ocfs2_rw_lock(struct inode *inode, int write); void ocfs2_rw_unlock(struct inode *inode, int write); +int ocfs2_meta_lock_atime(struct inode *inode, + struct vfsmount *vfsmnt, + int *level); int ocfs2_meta_lock_full(struct inode *inode, - struct ocfs2_journal_handle *handle, struct buffer_head **ret_bh, int ex, int arg_flags); int ocfs2_meta_lock_with_page(struct inode *inode, - struct ocfs2_journal_handle *handle, struct buffer_head **ret_bh, int ex, struct page *page); /* 99% of the time we don't want to supply any additional flags -- * those are for very specific cases only. */ -#define ocfs2_meta_lock(i, h, b, e) ocfs2_meta_lock_full(i, h, b, e, 0) +#define ocfs2_meta_lock(i, b, e) ocfs2_meta_lock_full(i, b, e, 0) void ocfs2_meta_unlock(struct inode *inode, int ex); int ocfs2_super_lock(struct ocfs2_super *osb, diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c index fb91089a60a7..06be6e774cf9 100644 --- a/fs/ocfs2/export.c +++ b/fs/ocfs2/export.c @@ -100,7 +100,7 @@ static struct dentry *ocfs2_get_parent(struct dentry *child) mlog(0, "find parent of directory %llu\n", (unsigned long long)OCFS2_I(dir)->ip_blkno); - status = ocfs2_meta_lock(dir, NULL, NULL, 0); + status = ocfs2_meta_lock(dir, NULL, 0); if (status < 0) { if (status != -ENOENT) mlog_errno(status); diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index fcd4475d1f89..80ac69f11d9f 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -61,7 +61,7 @@ struct ocfs2_em_insert_context { struct ocfs2_extent_map_entry *right_ent; }; -static kmem_cache_t *ocfs2_em_ent_cachep = NULL; +static struct kmem_cache *ocfs2_em_ent_cachep = NULL; static struct ocfs2_extent_map_entry * diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 1be74c4e7814..8786b3c490aa 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -31,6 +31,8 @@ #include <linux/pagemap.h> #include <linux/uio.h> #include <linux/sched.h> +#include <linux/pipe_fs_i.h> +#include <linux/mount.h> #define MLOG_MASK_PREFIX ML_INODE #include <cluster/masklog.h> @@ -134,7 +136,58 @@ bail: return (err < 0) ? -EIO : 0; } -int ocfs2_set_inode_size(struct ocfs2_journal_handle *handle, +int ocfs2_should_update_atime(struct inode *inode, + struct vfsmount *vfsmnt) +{ + struct timespec now; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) + return 0; + + if ((inode->i_flags & S_NOATIME) || + ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))) + return 0; + + if ((vfsmnt->mnt_flags & MNT_NOATIME) || + ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))) + return 0; + + now = CURRENT_TIME; + if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum)) + return 0; + else + return 1; +} + +int ocfs2_update_inode_atime(struct inode *inode, + struct buffer_head *bh) +{ + int ret; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + handle_t *handle; + + mlog_entry_void(); + + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (handle == NULL) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + inode->i_atime = CURRENT_TIME; + ret = ocfs2_mark_inode_dirty(handle, inode, bh); + if (ret < 0) + mlog_errno(ret); + + ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); +out: + mlog_exit(ret); + return ret; +} + +int ocfs2_set_inode_size(handle_t *handle, struct inode *inode, struct buffer_head *fe_bh, u64 new_i_size) @@ -163,10 +216,9 @@ static int ocfs2_simple_size_update(struct inode *inode, { int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle = NULL; - handle = ocfs2_start_trans(osb, NULL, - OCFS2_INODE_UPDATE_CREDITS); + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (handle == NULL) { ret = -ENOMEM; mlog_errno(ret); @@ -178,7 +230,7 @@ static int ocfs2_simple_size_update(struct inode *inode, if (ret < 0) mlog_errno(ret); - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); out: return ret; } @@ -189,14 +241,14 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb, u64 new_i_size) { int status; - struct ocfs2_journal_handle *handle; + handle_t *handle; mlog_entry_void(); /* TODO: This needs to actually orphan the inode in this * transaction. */ - handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS); + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); @@ -207,7 +259,7 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb, if (status < 0) mlog_errno(status); - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); out: mlog_exit(status); return status; @@ -328,7 +380,7 @@ int ocfs2_do_extend_allocation(struct ocfs2_super *osb, struct inode *inode, u32 clusters_to_add, struct buffer_head *fe_bh, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, enum ocfs2_alloc_restarted *reason_ret) @@ -433,7 +485,7 @@ static int ocfs2_extend_allocation(struct inode *inode, u32 prev_clusters; struct buffer_head *bh = NULL; struct ocfs2_dinode *fe = NULL; - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle = NULL; struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_alloc_context *meta_ac = NULL; enum ocfs2_alloc_restarted why; @@ -463,13 +515,6 @@ restart_all: (unsigned long long)OCFS2_I(inode)->ip_blkno, i_size_read(inode), fe->i_clusters, clusters_to_add); - handle = ocfs2_alloc_handle(osb); - if (handle == NULL) { - status = -ENOMEM; - mlog_errno(status); - goto leave; - } - num_free_extents = ocfs2_num_free_extents(osb, inode, fe); @@ -480,10 +525,7 @@ restart_all: } if (!num_free_extents) { - status = ocfs2_reserve_new_metadata(osb, - handle, - fe, - &meta_ac); + status = ocfs2_reserve_new_metadata(osb, fe, &meta_ac); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); @@ -491,10 +533,7 @@ restart_all: } } - status = ocfs2_reserve_clusters(osb, - handle, - clusters_to_add, - &data_ac); + status = ocfs2_reserve_clusters(osb, clusters_to_add, &data_ac); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); @@ -509,7 +548,7 @@ restart_all: drop_alloc_sem = 1; credits = ocfs2_calc_extend_credits(osb->sb, fe, clusters_to_add); - handle = ocfs2_start_trans(osb, handle, credits); + handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; @@ -589,7 +628,7 @@ leave: drop_alloc_sem = 0; } if (handle) { - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); handle = NULL; } if (data_ac) { @@ -624,7 +663,7 @@ static int ocfs2_write_zero_page(struct inode *inode, struct page *page; unsigned long index; unsigned int offset; - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle = NULL; int ret; offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */ @@ -668,7 +707,7 @@ static int ocfs2_write_zero_page(struct inode *inode, ret = 0; if (handle) - ocfs2_commit_trans(handle); + ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); out_unlock: unlock_page(page); page_cache_release(page); @@ -789,7 +828,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) struct super_block *sb = inode->i_sb; struct ocfs2_super *osb = OCFS2_SB(sb); struct buffer_head *bh = NULL; - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle = NULL; mlog_entry("(0x%p, '%.*s')\n", dentry, dentry->d_name.len, dentry->d_name.name); @@ -825,7 +864,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) } } - status = ocfs2_meta_lock(inode, NULL, &bh, 1); + status = ocfs2_meta_lock(inode, &bh, 1); if (status < 0) { if (status != -ENOENT) mlog_errno(status); @@ -845,7 +884,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) } } - handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS); + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); @@ -863,7 +902,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) mlog_errno(status); bail_commit: - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); bail_unlock: ocfs2_meta_unlock(inode, 1); bail_unlock_rw: @@ -906,19 +945,41 @@ bail: return err; } +int ocfs2_permission(struct inode *inode, int mask, struct nameidata *nd) +{ + int ret; + + mlog_entry_void(); + + ret = ocfs2_meta_lock(inode, NULL, 0); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = generic_permission(inode, mask, NULL); + if (ret) + mlog_errno(ret); + + ocfs2_meta_unlock(inode, 0); +out: + mlog_exit(ret); + return ret; +} + static int ocfs2_write_remove_suid(struct inode *inode) { int ret; struct buffer_head *bh = NULL; struct ocfs2_inode_info *oi = OCFS2_I(inode); - struct ocfs2_journal_handle *handle; + handle_t *handle; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di; mlog_entry("(Inode %llu, mode 0%o)\n", (unsigned long long)oi->ip_blkno, inode->i_mode); - handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS); + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (handle == NULL) { ret = -ENOMEM; mlog_errno(ret); @@ -951,75 +1012,29 @@ static int ocfs2_write_remove_suid(struct inode *inode) out_bh: brelse(bh); out_trans: - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); out: mlog_exit(ret); return ret; } -static inline int ocfs2_write_should_remove_suid(struct inode *inode) -{ - mode_t mode = inode->i_mode; - - if (!capable(CAP_FSETID)) { - if (unlikely(mode & S_ISUID)) - return 1; - - if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) - return 1; - } - return 0; -} - -static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, - const struct iovec *iov, - unsigned long nr_segs, - loff_t pos) +static int ocfs2_prepare_inode_for_write(struct dentry *dentry, + loff_t *ppos, + size_t count, + int appending) { - int ret, rw_level = -1, meta_level = -1, have_alloc_sem = 0; + int ret = 0, meta_level = appending; + struct inode *inode = dentry->d_inode; u32 clusters; - struct file *filp = iocb->ki_filp; - struct inode *inode = filp->f_dentry->d_inode; loff_t newsize, saved_pos; - mlog_entry("(0x%p, %u, '%.*s')\n", filp, - (unsigned int)nr_segs, - filp->f_dentry->d_name.len, - filp->f_dentry->d_name.name); - - /* happy write of zero bytes */ - if (iocb->ki_left == 0) - return 0; - - if (!inode) { - mlog(0, "bad inode\n"); - return -EIO; - } - - mutex_lock(&inode->i_mutex); - /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */ - if (filp->f_flags & O_DIRECT) { - have_alloc_sem = 1; - down_read(&inode->i_alloc_sem); - } - - /* concurrent O_DIRECT writes are allowed */ - rw_level = (filp->f_flags & O_DIRECT) ? 0 : 1; - ret = ocfs2_rw_lock(inode, rw_level); - if (ret < 0) { - rw_level = -1; - mlog_errno(ret); - goto out; - } - /* * We sample i_size under a read level meta lock to see if our write * is extending the file, if it is we back off and get a write level * meta lock. */ - meta_level = (filp->f_flags & O_APPEND) ? 1 : 0; for(;;) { - ret = ocfs2_meta_lock(inode, NULL, NULL, meta_level); + ret = ocfs2_meta_lock(inode, NULL, meta_level); if (ret < 0) { meta_level = -1; mlog_errno(ret); @@ -1035,7 +1050,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, * inode. There's also the dinode i_size state which * can be lost via setattr during extending writes (we * set inode->i_size at the end of a write. */ - if (ocfs2_write_should_remove_suid(inode)) { + if (should_remove_suid(dentry)) { if (meta_level == 0) { ocfs2_meta_unlock(inode, meta_level); meta_level = 1; @@ -1045,19 +1060,19 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, ret = ocfs2_write_remove_suid(inode); if (ret < 0) { mlog_errno(ret); - goto out; + goto out_unlock; } } /* work on a copy of ppos until we're sure that we won't have * to recalculate it due to relocking. */ - if (filp->f_flags & O_APPEND) { + if (appending) { saved_pos = i_size_read(inode); mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos); } else { - saved_pos = iocb->ki_pos; + saved_pos = *ppos; } - newsize = iocb->ki_left + saved_pos; + newsize = count + saved_pos; mlog(0, "pos=%lld newsize=%lld cursize=%lld\n", (long long) saved_pos, (long long) newsize, @@ -1090,19 +1105,66 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, if (!clusters) break; - ret = ocfs2_extend_file(inode, NULL, newsize, iocb->ki_left); + ret = ocfs2_extend_file(inode, NULL, newsize, count); if (ret < 0) { if (ret != -ENOSPC) mlog_errno(ret); - goto out; + goto out_unlock; } break; } - /* ok, we're done with i_size and alloc work */ - iocb->ki_pos = saved_pos; + if (appending) + *ppos = saved_pos; + +out_unlock: ocfs2_meta_unlock(inode, meta_level); - meta_level = -1; + +out: + return ret; +} + +static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, + const struct iovec *iov, + unsigned long nr_segs, + loff_t pos) +{ + int ret, rw_level, have_alloc_sem = 0; + struct file *filp = iocb->ki_filp; + struct inode *inode = filp->f_dentry->d_inode; + int appending = filp->f_flags & O_APPEND ? 1 : 0; + + mlog_entry("(0x%p, %u, '%.*s')\n", filp, + (unsigned int)nr_segs, + filp->f_dentry->d_name.len, + filp->f_dentry->d_name.name); + + /* happy write of zero bytes */ + if (iocb->ki_left == 0) + return 0; + + mutex_lock(&inode->i_mutex); + /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */ + if (filp->f_flags & O_DIRECT) { + have_alloc_sem = 1; + down_read(&inode->i_alloc_sem); + } + + /* concurrent O_DIRECT writes are allowed */ + rw_level = (filp->f_flags & O_DIRECT) ? 0 : 1; + ret = ocfs2_rw_lock(inode, rw_level); + if (ret < 0) { + rw_level = -1; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_prepare_inode_for_write(filp->f_dentry, &iocb->ki_pos, + iocb->ki_left, appending); + if (ret < 0) { + mlog_errno(ret); + goto out; + } /* communicate with ocfs2_dio_end_io */ ocfs2_iocb_set_rw_locked(iocb); @@ -1128,8 +1190,6 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, } out: - if (meta_level != -1) - ocfs2_meta_unlock(inode, meta_level); if (have_alloc_sem) up_read(&inode->i_alloc_sem); if (rw_level != -1) @@ -1140,12 +1200,83 @@ out: return ret; } +static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe, + struct file *out, + loff_t *ppos, + size_t len, + unsigned int flags) +{ + int ret; + struct inode *inode = out->f_dentry->d_inode; + + mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe, + (unsigned int)len, + out->f_dentry->d_name.len, + out->f_dentry->d_name.name); + + inode_double_lock(inode, pipe->inode); + + ret = ocfs2_rw_lock(inode, 1); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_prepare_inode_for_write(out->f_dentry, ppos, len, 0); + if (ret < 0) { + mlog_errno(ret); + goto out_unlock; + } + + /* ok, we're done with i_size and alloc work */ + ret = generic_file_splice_write_nolock(pipe, out, ppos, len, flags); + +out_unlock: + ocfs2_rw_unlock(inode, 1); +out: + inode_double_unlock(inode, pipe->inode); + + mlog_exit(ret); + return ret; +} + +static ssize_t ocfs2_file_splice_read(struct file *in, + loff_t *ppos, + struct pipe_inode_info *pipe, + size_t len, + unsigned int flags) +{ + int ret = 0; + struct inode *inode = in->f_dentry->d_inode; + + mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe, + (unsigned int)len, + in->f_dentry->d_name.len, + in->f_dentry->d_name.name); + + /* + * See the comment in ocfs2_file_aio_read() + */ + ret = ocfs2_meta_lock(inode, NULL, 0); + if (ret < 0) { + mlog_errno(ret); + goto bail; + } + ocfs2_meta_unlock(inode, 0); + + ret = generic_file_splice_read(in, ppos, pipe, len, flags); + +bail: + mlog_exit(ret); + return ret; +} + static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { - int ret = 0, rw_level = -1, have_alloc_sem = 0; + int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0; struct file *filp = iocb->ki_filp; struct inode *inode = filp->f_dentry->d_inode; @@ -1187,12 +1318,12 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, * like i_size. This allows the checks down below * generic_file_aio_read() a chance of actually working. */ - ret = ocfs2_meta_lock(inode, NULL, NULL, 0); + ret = ocfs2_meta_lock_atime(inode, filp->f_vfsmnt, &lock_level); if (ret < 0) { mlog_errno(ret); goto bail; } - ocfs2_meta_unlock(inode, 0); + ocfs2_meta_unlock(inode, lock_level); ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos); if (ret == -EINVAL) @@ -1220,11 +1351,13 @@ bail: struct inode_operations ocfs2_file_iops = { .setattr = ocfs2_setattr, .getattr = ocfs2_getattr, + .permission = ocfs2_permission, }; struct inode_operations ocfs2_special_file_iops = { .setattr = ocfs2_setattr, .getattr = ocfs2_getattr, + .permission = ocfs2_permission, }; const struct file_operations ocfs2_fops = { @@ -1238,6 +1371,8 @@ const struct file_operations ocfs2_fops = { .aio_read = ocfs2_file_aio_read, .aio_write = ocfs2_file_aio_write, .ioctl = ocfs2_ioctl, + .splice_read = ocfs2_file_splice_read, + .splice_write = ocfs2_file_splice_write, }; const struct file_operations ocfs2_dops = { diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h index 740c9e7ca599..601a453f18a8 100644 --- a/fs/ocfs2/file.h +++ b/fs/ocfs2/file.h @@ -41,17 +41,24 @@ int ocfs2_do_extend_allocation(struct ocfs2_super *osb, struct inode *inode, u32 clusters_to_add, struct buffer_head *fe_bh, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, enum ocfs2_alloc_restarted *reason); int ocfs2_setattr(struct dentry *dentry, struct iattr *attr); int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); +int ocfs2_permission(struct inode *inode, int mask, + struct nameidata *nd); -int ocfs2_set_inode_size(struct ocfs2_journal_handle *handle, +int ocfs2_set_inode_size(handle_t *handle, struct inode *inode, struct buffer_head *fe_bh, u64 new_i_size); +int ocfs2_should_update_atime(struct inode *inode, + struct vfsmount *vfsmnt); +int ocfs2_update_inode_atime(struct inode *inode, + struct buffer_head *bh); + #endif /* OCFS2_FILE_H */ diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 16e8e74dc966..42e361f3054f 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -360,7 +360,6 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, inode); ocfs2_set_inode_flags(inode); - inode->i_flags |= S_NOATIME; status = 0; bail: @@ -441,7 +440,7 @@ static int ocfs2_read_locked_inode(struct inode *inode, generation, inode); if (can_lock) { - status = ocfs2_meta_lock(inode, NULL, NULL, 0); + status = ocfs2_meta_lock(inode, NULL, 0); if (status) { make_bad_inode(inode); mlog_errno(status); @@ -512,7 +511,7 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb, struct buffer_head *fe_bh) { int status = 0; - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle = NULL; struct ocfs2_truncate_context *tc = NULL; struct ocfs2_dinode *fe; @@ -524,7 +523,7 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb, if (!fe->i_clusters) goto bail; - handle = ocfs2_start_trans(osb, handle, OCFS2_INODE_UPDATE_CREDITS); + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; @@ -538,7 +537,7 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb, goto bail; } - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); handle = NULL; status = ocfs2_prepare_truncate(osb, inode, fe_bh, &tc); @@ -554,7 +553,7 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb, } bail: if (handle) - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); mlog_exit(status); return status; @@ -568,7 +567,7 @@ static int ocfs2_remove_inode(struct inode *inode, int status; struct inode *inode_alloc_inode = NULL; struct buffer_head *inode_alloc_bh = NULL; - struct ocfs2_journal_handle *handle; + handle_t *handle; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data; @@ -582,7 +581,7 @@ static int ocfs2_remove_inode(struct inode *inode, } mutex_lock(&inode_alloc_inode->i_mutex); - status = ocfs2_meta_lock(inode_alloc_inode, NULL, &inode_alloc_bh, 1); + status = ocfs2_meta_lock(inode_alloc_inode, &inode_alloc_bh, 1); if (status < 0) { mutex_unlock(&inode_alloc_inode->i_mutex); @@ -590,7 +589,7 @@ static int ocfs2_remove_inode(struct inode *inode, goto bail; } - handle = ocfs2_start_trans(osb, NULL, OCFS2_DELETE_INODE_CREDITS); + handle = ocfs2_start_trans(osb, OCFS2_DELETE_INODE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); @@ -629,7 +628,7 @@ static int ocfs2_remove_inode(struct inode *inode, mlog_errno(status); bail_commit: - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); bail_unlock: ocfs2_meta_unlock(inode_alloc_inode, 1); mutex_unlock(&inode_alloc_inode->i_mutex); @@ -705,7 +704,7 @@ static int ocfs2_wipe_inode(struct inode *inode, * delete_inode operation. We do this now to avoid races with * recovery completion on other nodes. */ mutex_lock(&orphan_dir_inode->i_mutex); - status = ocfs2_meta_lock(orphan_dir_inode, NULL, &orphan_dir_bh, 1); + status = ocfs2_meta_lock(orphan_dir_inode, &orphan_dir_bh, 1); if (status < 0) { mutex_unlock(&orphan_dir_inode->i_mutex); @@ -933,7 +932,7 @@ void ocfs2_delete_inode(struct inode *inode) * allocation lock here as it won't be needed - nobody will * have the file open. */ - status = ocfs2_meta_lock(inode, NULL, &di_bh, 1); + status = ocfs2_meta_lock(inode, &di_bh, 1); if (status < 0) { if (status != -ENOENT) mlog_errno(status); @@ -1067,12 +1066,6 @@ void ocfs2_clear_inode(struct inode *inode) mlog_bug_on_msg(oi->ip_open_count, "Clear inode of %llu has open count %d\n", (unsigned long long)oi->ip_blkno, oi->ip_open_count); - mlog_bug_on_msg(!list_empty(&oi->ip_handle_list), - "Clear inode of %llu has non empty handle list\n", - (unsigned long long)oi->ip_blkno); - mlog_bug_on_msg(oi->ip_handle, - "Clear inode of %llu has non empty handle pointer\n", - (unsigned long long)oi->ip_blkno); /* Clear all other flags. */ oi->ip_flags = OCFS2_INODE_CACHE_INLINE; @@ -1186,7 +1179,7 @@ int ocfs2_inode_revalidate(struct dentry *dentry) /* Let ocfs2_meta_lock do the work of updating our struct * inode for us. */ - status = ocfs2_meta_lock(inode, NULL, NULL, 0); + status = ocfs2_meta_lock(inode, NULL, 0); if (status < 0) { if (status != -ENOENT) mlog_errno(status); @@ -1204,7 +1197,7 @@ bail: * struct inode. * Only takes ip_lock. */ -int ocfs2_mark_inode_dirty(struct ocfs2_journal_handle *handle, +int ocfs2_mark_inode_dirty(handle_t *handle, struct inode *inode, struct buffer_head *bh) { diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h index 9957810fdf85..1a7dd2945b34 100644 --- a/fs/ocfs2/inode.h +++ b/fs/ocfs2/inode.h @@ -48,13 +48,6 @@ struct ocfs2_inode_info struct mutex ip_io_mutex; - /* Used by the journalling code to attach an inode to a - * handle. These are protected by ip_io_mutex in order to lock - * out other I/O to the inode until we either commit or - * abort. */ - struct list_head ip_handle_list; - struct ocfs2_journal_handle *ip_handle; - u32 ip_flags; /* see below */ u32 ip_attr; /* inode attributes */ @@ -113,7 +106,7 @@ static inline struct ocfs2_inode_info *OCFS2_I(struct inode *inode) #define INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags & OCFS2_INODE_JOURNAL) #define SET_INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags |= OCFS2_INODE_JOURNAL) -extern kmem_cache_t *ocfs2_inode_cache; +extern struct kmem_cache *ocfs2_inode_cache; extern const struct address_space_operations ocfs2_aops; @@ -143,7 +136,7 @@ ssize_t ocfs2_rw_direct(int rw, struct file *filp, char *buf, void ocfs2_sync_blockdev(struct super_block *sb); void ocfs2_refresh_inode(struct inode *inode, struct ocfs2_dinode *fe); -int ocfs2_mark_inode_dirty(struct ocfs2_journal_handle *handle, +int ocfs2_mark_inode_dirty(handle_t *handle, struct inode *inode, struct buffer_head *bh); int ocfs2_aio_read(struct file *file, struct kiocb *req, struct iocb *iocb); diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index 3663cef80689..4768be5f3086 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c @@ -26,7 +26,7 @@ static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags) { int status; - status = ocfs2_meta_lock(inode, NULL, NULL, 0); + status = ocfs2_meta_lock(inode, NULL, 0); if (status < 0) { mlog_errno(status); return status; @@ -43,14 +43,14 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags, { struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle = NULL; struct buffer_head *bh = NULL; unsigned oldflags; int status; mutex_lock(&inode->i_mutex); - status = ocfs2_meta_lock(inode, NULL, &bh, 1); + status = ocfs2_meta_lock(inode, &bh, 1); if (status < 0) { mlog_errno(status); goto bail; @@ -67,7 +67,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags, if (!S_ISDIR(inode->i_mode)) flags &= ~OCFS2_DIRSYNC_FL; - handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS); + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); @@ -96,7 +96,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags, if (status < 0) mlog_errno(status); - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); bail_unlock: ocfs2_meta_unlock(inode, 1); bail: diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index fd9734def551..1d7f4ab1e5ed 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -57,9 +57,6 @@ static int ocfs2_recover_node(struct ocfs2_super *osb, static int __ocfs2_recovery_thread(void *arg); static int ocfs2_commit_cache(struct ocfs2_super *osb); static int ocfs2_wait_on_mount(struct ocfs2_super *osb); -static void ocfs2_handle_cleanup_locks(struct ocfs2_journal *journal, - struct ocfs2_journal_handle *handle); -static void ocfs2_commit_unstarted_handle(struct ocfs2_journal_handle *handle); static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, int dirty); static int ocfs2_trylock_journal(struct ocfs2_super *osb, @@ -113,46 +110,18 @@ finally: return status; } -struct ocfs2_journal_handle *ocfs2_alloc_handle(struct ocfs2_super *osb) -{ - struct ocfs2_journal_handle *retval = NULL; - - retval = kcalloc(1, sizeof(*retval), GFP_NOFS); - if (!retval) { - mlog(ML_ERROR, "Failed to allocate memory for journal " - "handle!\n"); - return NULL; - } - - retval->max_buffs = 0; - retval->num_locks = 0; - retval->k_handle = NULL; - - INIT_LIST_HEAD(&retval->locks); - INIT_LIST_HEAD(&retval->inode_list); - retval->journal = osb->journal; - - return retval; -} - /* pass it NULL and it will allocate a new handle object for you. If * you pass it a handle however, it may still return error, in which * case it has free'd the passed handle for you. */ -struct ocfs2_journal_handle *ocfs2_start_trans(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, - int max_buffs) +handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs) { - int ret; journal_t *journal = osb->journal->j_journal; - - mlog_entry("(max_buffs = %d)\n", max_buffs); + handle_t *handle; BUG_ON(!osb || !osb->journal->j_journal); - if (ocfs2_is_hard_readonly(osb)) { - ret = -EROFS; - goto done_free; - } + if (ocfs2_is_hard_readonly(osb)) + return ERR_PTR(-EROFS); BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE); BUG_ON(max_buffs <= 0); @@ -163,154 +132,39 @@ struct ocfs2_journal_handle *ocfs2_start_trans(struct ocfs2_super *osb, BUG(); } - if (!handle) - handle = ocfs2_alloc_handle(osb); - if (!handle) { - ret = -ENOMEM; - mlog(ML_ERROR, "Failed to allocate memory for journal " - "handle!\n"); - goto done_free; - } - - handle->max_buffs = max_buffs; - down_read(&osb->journal->j_trans_barrier); - /* actually start the transaction now */ - handle->k_handle = journal_start(journal, max_buffs); - if (IS_ERR(handle->k_handle)) { + handle = journal_start(journal, max_buffs); + if (IS_ERR(handle)) { up_read(&osb->journal->j_trans_barrier); - ret = PTR_ERR(handle->k_handle); - handle->k_handle = NULL; - mlog_errno(ret); + mlog_errno(PTR_ERR(handle)); if (is_journal_aborted(journal)) { ocfs2_abort(osb->sb, "Detected aborted journal"); - ret = -EROFS; + handle = ERR_PTR(-EROFS); } - goto done_free; - } - - atomic_inc(&(osb->journal->j_num_trans)); - handle->flags |= OCFS2_HANDLE_STARTED; + } else + atomic_inc(&(osb->journal->j_num_trans)); - mlog_exit_ptr(handle); return handle; - -done_free: - if (handle) - ocfs2_commit_unstarted_handle(handle); /* will kfree handle */ - - mlog_exit(ret); - return ERR_PTR(ret); -} - -void ocfs2_handle_add_inode(struct ocfs2_journal_handle *handle, - struct inode *inode) -{ - BUG_ON(!handle); - BUG_ON(!inode); - - atomic_inc(&inode->i_count); - - /* we're obviously changing it... */ - mutex_lock(&inode->i_mutex); - - /* sanity check */ - BUG_ON(OCFS2_I(inode)->ip_handle); - BUG_ON(!list_empty(&OCFS2_I(inode)->ip_handle_list)); - - OCFS2_I(inode)->ip_handle = handle; - list_move_tail(&(OCFS2_I(inode)->ip_handle_list), &(handle->inode_list)); -} - -static void ocfs2_handle_unlock_inodes(struct ocfs2_journal_handle *handle) -{ - struct list_head *p, *n; - struct inode *inode; - struct ocfs2_inode_info *oi; - - list_for_each_safe(p, n, &handle->inode_list) { - oi = list_entry(p, struct ocfs2_inode_info, - ip_handle_list); - inode = &oi->vfs_inode; - - OCFS2_I(inode)->ip_handle = NULL; - list_del_init(&OCFS2_I(inode)->ip_handle_list); - - mutex_unlock(&inode->i_mutex); - iput(inode); - } -} - -/* This is trivial so we do it out of the main commit - * paths. Beware, it can be called from start_trans too! */ -static void ocfs2_commit_unstarted_handle(struct ocfs2_journal_handle *handle) -{ - mlog_entry_void(); - - BUG_ON(handle->flags & OCFS2_HANDLE_STARTED); - - ocfs2_handle_unlock_inodes(handle); - /* You are allowed to add journal locks before the transaction - * has started. */ - ocfs2_handle_cleanup_locks(handle->journal, handle); - - kfree(handle); - - mlog_exit_void(); } -void ocfs2_commit_trans(struct ocfs2_journal_handle *handle) +int ocfs2_commit_trans(struct ocfs2_super *osb, + handle_t *handle) { - handle_t *jbd_handle; - int retval; - struct ocfs2_journal *journal = handle->journal; - - mlog_entry_void(); + int ret; + struct ocfs2_journal *journal = osb->journal; BUG_ON(!handle); - if (!(handle->flags & OCFS2_HANDLE_STARTED)) { - ocfs2_commit_unstarted_handle(handle); - mlog_exit_void(); - return; - } - - /* release inode semaphores we took during this transaction */ - ocfs2_handle_unlock_inodes(handle); - - /* ocfs2_extend_trans may have had to call journal_restart - * which will always commit the transaction, but may return - * error for any number of reasons. If this is the case, we - * clear k_handle as it's not valid any more. */ - if (handle->k_handle) { - jbd_handle = handle->k_handle; - - if (handle->flags & OCFS2_HANDLE_SYNC) - jbd_handle->h_sync = 1; - else - jbd_handle->h_sync = 0; - - /* actually stop the transaction. if we've set h_sync, - * it'll have been committed when we return */ - retval = journal_stop(jbd_handle); - if (retval < 0) { - mlog_errno(retval); - mlog(ML_ERROR, "Could not commit transaction\n"); - BUG(); - } - - handle->k_handle = NULL; /* it's been free'd in journal_stop */ - } - - ocfs2_handle_cleanup_locks(journal, handle); + ret = journal_stop(handle); + if (ret < 0) + mlog_errno(ret); up_read(&journal->j_trans_barrier); - kfree(handle); - mlog_exit_void(); + return ret; } /* @@ -326,20 +180,18 @@ void ocfs2_commit_trans(struct ocfs2_journal_handle *handle) * good because transaction ids haven't yet been recorded on the * cluster locks associated with this handle. */ -int ocfs2_extend_trans(struct ocfs2_journal_handle *handle, - int nblocks) +int ocfs2_extend_trans(handle_t *handle, int nblocks) { int status; BUG_ON(!handle); - BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED)); BUG_ON(!nblocks); mlog_entry_void(); mlog(0, "Trying to extend transaction by %d blocks\n", nblocks); - status = journal_extend(handle->k_handle, nblocks); + status = journal_extend(handle, nblocks); if (status < 0) { mlog_errno(status); goto bail; @@ -347,15 +199,12 @@ int ocfs2_extend_trans(struct ocfs2_journal_handle *handle, if (status > 0) { mlog(0, "journal_extend failed, trying journal_restart\n"); - status = journal_restart(handle->k_handle, nblocks); + status = journal_restart(handle, nblocks); if (status < 0) { - handle->k_handle = NULL; mlog_errno(status); goto bail; } - handle->max_buffs = nblocks; - } else - handle->max_buffs += nblocks; + } status = 0; bail: @@ -364,7 +213,7 @@ bail: return status; } -int ocfs2_journal_access(struct ocfs2_journal_handle *handle, +int ocfs2_journal_access(handle_t *handle, struct inode *inode, struct buffer_head *bh, int type) @@ -374,7 +223,6 @@ int ocfs2_journal_access(struct ocfs2_journal_handle *handle, BUG_ON(!inode); BUG_ON(!handle); BUG_ON(!bh); - BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED)); mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n", (unsigned long long)bh->b_blocknr, type, @@ -403,11 +251,11 @@ int ocfs2_journal_access(struct ocfs2_journal_handle *handle, switch (type) { case OCFS2_JOURNAL_ACCESS_CREATE: case OCFS2_JOURNAL_ACCESS_WRITE: - status = journal_get_write_access(handle->k_handle, bh); + status = journal_get_write_access(handle, bh); break; case OCFS2_JOURNAL_ACCESS_UNDO: - status = journal_get_undo_access(handle->k_handle, bh); + status = journal_get_undo_access(handle, bh); break; default: @@ -424,17 +272,15 @@ int ocfs2_journal_access(struct ocfs2_journal_handle *handle, return status; } -int ocfs2_journal_dirty(struct ocfs2_journal_handle *handle, +int ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh) { int status; - BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED)); - mlog_entry("(bh->b_blocknr=%llu)\n", (unsigned long long)bh->b_blocknr); - status = journal_dirty_metadata(handle->k_handle, bh); + status = journal_dirty_metadata(handle, bh); if (status < 0) mlog(ML_ERROR, "Could not dirty metadata buffer. " "(bh->b_blocknr=%llu)\n", @@ -456,59 +302,6 @@ int ocfs2_journal_dirty_data(handle_t *handle, return err; } -/* We always assume you're adding a metadata lock at level 'ex' */ -int ocfs2_handle_add_lock(struct ocfs2_journal_handle *handle, - struct inode *inode) -{ - int status; - struct ocfs2_journal_lock *lock; - - BUG_ON(!inode); - - lock = kmem_cache_alloc(ocfs2_lock_cache, GFP_NOFS); - if (!lock) { - status = -ENOMEM; - mlog_errno(-ENOMEM); - goto bail; - } - - if (!igrab(inode)) - BUG(); - lock->jl_inode = inode; - - list_add_tail(&(lock->jl_lock_list), &(handle->locks)); - handle->num_locks++; - - status = 0; -bail: - mlog_exit(status); - return status; -} - -static void ocfs2_handle_cleanup_locks(struct ocfs2_journal *journal, - struct ocfs2_journal_handle *handle) -{ - struct list_head *p, *n; - struct ocfs2_journal_lock *lock; - struct inode *inode; - - list_for_each_safe(p, n, &(handle->locks)) { - lock = list_entry(p, struct ocfs2_journal_lock, - jl_lock_list); - list_del(&lock->jl_lock_list); - handle->num_locks--; - - inode = lock->jl_inode; - ocfs2_meta_unlock(inode, 1); - if (atomic_read(&inode->i_count) == 1) - mlog(ML_ERROR, - "Inode %llu, I'm doing a last iput for!", - (unsigned long long)OCFS2_I(inode)->ip_blkno); - iput(inode); - kmem_cache_free(ocfs2_lock_cache, lock); - } -} - #define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * 5) void ocfs2_set_journal_params(struct ocfs2_super *osb) @@ -562,8 +355,7 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty) /* Skip recovery waits here - journal inode metadata never * changes in a live cluster so it can be considered an * exception to the rule. */ - status = ocfs2_meta_lock_full(inode, NULL, &bh, 1, - OCFS2_META_LOCK_RECOVERY); + status = ocfs2_meta_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY); if (status < 0) { if (status != -ERESTARTSYS) mlog(ML_ERROR, "Could not get lock on journal!\n"); @@ -911,11 +703,12 @@ struct ocfs2_la_recovery_item { * NOTE: This function can and will sleep on recovery of other nodes * during cluster locking, just like any other ocfs2 process. */ -void ocfs2_complete_recovery(void *data) +void ocfs2_complete_recovery(struct work_struct *work) { int ret; - struct ocfs2_super *osb = data; - struct ocfs2_journal *journal = osb->journal; + struct ocfs2_journal *journal = + container_of(work, struct ocfs2_journal, j_recovery_work); + struct ocfs2_super *osb = journal->j_osb; struct ocfs2_dinode *la_dinode, *tl_dinode; struct ocfs2_la_recovery_item *item; struct list_head *p, *n; @@ -1160,8 +953,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb, } SET_INODE_JOURNAL(inode); - status = ocfs2_meta_lock_full(inode, NULL, &bh, 1, - OCFS2_META_LOCK_RECOVERY); + status = ocfs2_meta_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY); if (status < 0) { mlog(0, "status returned from ocfs2_meta_lock=%d\n", status); if (status != -ERESTARTSYS) @@ -1350,7 +1142,7 @@ static int ocfs2_trylock_journal(struct ocfs2_super *osb, SET_INODE_JOURNAL(inode); flags = OCFS2_META_LOCK_RECOVERY | OCFS2_META_LOCK_NOQUEUE; - status = ocfs2_meta_lock_full(inode, NULL, NULL, 1, flags); + status = ocfs2_meta_lock_full(inode, NULL, 1, flags); if (status < 0) { if (status != -EAGAIN) mlog_errno(status); @@ -1433,7 +1225,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb, } mutex_lock(&orphan_dir_inode->i_mutex); - status = ocfs2_meta_lock(orphan_dir_inode, NULL, NULL, 0); + status = ocfs2_meta_lock(orphan_dir_inode, NULL, 0); if (status < 0) { mlog_errno(status); goto out; diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 2f3a6acdac45..899112ad8136 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -37,7 +37,6 @@ enum ocfs2_journal_state { struct ocfs2_super; struct ocfs2_dinode; -struct ocfs2_journal_handle; struct ocfs2_journal { enum ocfs2_journal_state j_state; /* Journals current state */ @@ -133,46 +132,8 @@ static inline void ocfs2_inode_set_new(struct ocfs2_super *osb, spin_unlock(&trans_inc_lock); } -extern kmem_cache_t *ocfs2_lock_cache; - -struct ocfs2_journal_lock { - struct inode *jl_inode; - struct list_head jl_lock_list; -}; - -struct ocfs2_journal_handle { - handle_t *k_handle; /* kernel handle. */ - struct ocfs2_journal *journal; - u32 flags; /* see flags below. */ - int max_buffs; /* Buffs reserved by this handle */ - - /* The following two fields are for ocfs2_handle_add_lock */ - int num_locks; - struct list_head locks; /* A bunch of locks to - * release on commit. This - * should be a list_head */ - - struct list_head inode_list; -}; - -#define OCFS2_HANDLE_STARTED 1 -/* should we sync-commit this handle? */ -#define OCFS2_HANDLE_SYNC 2 -static inline int ocfs2_handle_started(struct ocfs2_journal_handle *handle) -{ - return handle->flags & OCFS2_HANDLE_STARTED; -} - -static inline void ocfs2_handle_set_sync(struct ocfs2_journal_handle *handle, int sync) -{ - if (sync) - handle->flags |= OCFS2_HANDLE_SYNC; - else - handle->flags &= ~OCFS2_HANDLE_SYNC; -} - /* Exported only for the journal struct init code in super.c. Do not call. */ -void ocfs2_complete_recovery(void *data); +void ocfs2_complete_recovery(struct work_struct *work); /* * Journal Control: @@ -231,15 +192,14 @@ static inline void ocfs2_checkpoint_inode(struct inode *inode) * Transaction Handling: * Manage the lifetime of a transaction handle. * - * ocfs2_alloc_handle - Only allocate a handle so we can start putting - * cluster locks on it. To actually change blocks, - * call ocfs2_start_trans with the handle returned - * from this function. You may call ocfs2_commit_trans - * at any time in the lifetime of a handle. * ocfs2_start_trans - Begin a transaction. Give it an upper estimate of * the number of blocks that will be changed during * this handle. - * ocfs2_commit_trans - Complete a handle. + * ocfs2_commit_trans - Complete a handle. It might return -EIO if + * the journal was aborted. The majority of paths don't + * check the return value as an error there comes too + * late to do anything (and will be picked up in a + * later transaction). * ocfs2_extend_trans - Extend a handle by nblocks credits. This may * commit the handle to disk in the process, but will * not release any locks taken during the transaction. @@ -249,24 +209,16 @@ static inline void ocfs2_checkpoint_inode(struct inode *inode) * ocfs2_journal_dirty - Mark a journalled buffer as having dirty data. * ocfs2_journal_dirty_data - Indicate that a data buffer should go out before * the current handle commits. - * ocfs2_handle_add_lock - Sometimes we need to delay lock release - * until after a transaction has been completed. Use - * ocfs2_handle_add_lock to indicate that a lock needs - * to be released at the end of that handle. Locks - * will be released in the order that they are added. - * ocfs2_handle_add_inode - Add a locked inode to a transaction. */ /* You must always start_trans with a number of buffs > 0, but it's * perfectly legal to go through an entire transaction without having * dirtied any buffers. */ -struct ocfs2_journal_handle *ocfs2_alloc_handle(struct ocfs2_super *osb); -struct ocfs2_journal_handle *ocfs2_start_trans(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, +handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs); -void ocfs2_commit_trans(struct ocfs2_journal_handle *handle); -int ocfs2_extend_trans(struct ocfs2_journal_handle *handle, - int nblocks); +int ocfs2_commit_trans(struct ocfs2_super *osb, + handle_t *handle); +int ocfs2_extend_trans(handle_t *handle, int nblocks); /* * Create access is for when we get a newly created buffer and we're @@ -283,7 +235,7 @@ int ocfs2_extend_trans(struct ocfs2_journal_handle *handle, #define OCFS2_JOURNAL_ACCESS_WRITE 1 #define OCFS2_JOURNAL_ACCESS_UNDO 2 -int ocfs2_journal_access(struct ocfs2_journal_handle *handle, +int ocfs2_journal_access(handle_t *handle, struct inode *inode, struct buffer_head *bh, int type); @@ -306,18 +258,10 @@ int ocfs2_journal_access(struct ocfs2_journal_handle *handle, * <modify the bh> * ocfs2_journal_dirty(handle, bh); */ -int ocfs2_journal_dirty(struct ocfs2_journal_handle *handle, +int ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh); int ocfs2_journal_dirty_data(handle_t *handle, struct buffer_head *bh); -int ocfs2_handle_add_lock(struct ocfs2_journal_handle *handle, - struct inode *inode); -/* - * Use this to protect from other processes reading buffer state while - * it's in flight. - */ -void ocfs2_handle_add_inode(struct ocfs2_journal_handle *handle, - struct inode *inode); /* * Credit Macros: diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index 1f17a4d08287..698d79a74ef8 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c @@ -58,19 +58,18 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb, static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc); static int ocfs2_sync_local_to_main(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct ocfs2_dinode *alloc, struct inode *main_bm_inode, struct buffer_head *main_bm_bh); static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, struct ocfs2_alloc_context **ac, struct inode **bitmap_inode, struct buffer_head **bitmap_bh); static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct ocfs2_alloc_context *ac); static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb, @@ -196,7 +195,7 @@ bail: void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb) { int status; - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle; struct inode *local_alloc_inode = NULL; struct buffer_head *bh = NULL; struct buffer_head *main_bm_bh = NULL; @@ -207,7 +206,7 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb) mlog_entry_void(); if (osb->local_alloc_state == OCFS2_LA_UNUSED) - goto bail; + goto out; local_alloc_inode = ocfs2_get_system_file_inode(osb, @@ -216,40 +215,34 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb) if (!local_alloc_inode) { status = -ENOENT; mlog_errno(status); - goto bail; + goto out; } osb->local_alloc_state = OCFS2_LA_DISABLED; - handle = ocfs2_alloc_handle(osb); - if (!handle) { - status = -ENOMEM; - mlog_errno(status); - goto bail; - } - main_bm_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!main_bm_inode) { status = -EINVAL; mlog_errno(status); - goto bail; + goto out; } - ocfs2_handle_add_inode(handle, main_bm_inode); - status = ocfs2_meta_lock(main_bm_inode, handle, &main_bm_bh, 1); + mutex_lock(&main_bm_inode->i_mutex); + + status = ocfs2_meta_lock(main_bm_inode, &main_bm_bh, 1); if (status < 0) { mlog_errno(status); - goto bail; + goto out_mutex; } /* WINDOW_MOVE_CREDITS is a bit heavy... */ - handle = ocfs2_start_trans(osb, handle, OCFS2_WINDOW_MOVE_CREDITS); + handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS); if (IS_ERR(handle)) { mlog_errno(PTR_ERR(handle)); handle = NULL; - goto bail; + goto out_unlock; } bh = osb->local_alloc_bh; @@ -258,7 +251,7 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb) alloc_copy = kmalloc(bh->b_size, GFP_KERNEL); if (!alloc_copy) { status = -ENOMEM; - goto bail; + goto out_commit; } memcpy(alloc_copy, alloc, bh->b_size); @@ -266,7 +259,7 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb) OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); - goto bail; + goto out_commit; } ocfs2_clear_local_alloc(alloc); @@ -274,7 +267,7 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb) status = ocfs2_journal_dirty(handle, bh); if (status < 0) { mlog_errno(status); - goto bail; + goto out_commit; } brelse(bh); @@ -286,16 +279,20 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb) if (status < 0) mlog_errno(status); -bail: - if (handle) - ocfs2_commit_trans(handle); +out_commit: + ocfs2_commit_trans(osb, handle); +out_unlock: if (main_bm_bh) brelse(main_bm_bh); - if (main_bm_inode) - iput(main_bm_inode); + ocfs2_meta_unlock(main_bm_inode, 1); +out_mutex: + mutex_unlock(&main_bm_inode->i_mutex); + iput(main_bm_inode); + +out: if (local_alloc_inode) iput(local_alloc_inode); @@ -385,61 +382,59 @@ int ocfs2_complete_local_alloc_recovery(struct ocfs2_super *osb, struct ocfs2_dinode *alloc) { int status; - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle; struct buffer_head *main_bm_bh = NULL; - struct inode *main_bm_inode = NULL; + struct inode *main_bm_inode; mlog_entry_void(); - handle = ocfs2_alloc_handle(osb); - if (!handle) { - status = -ENOMEM; - mlog_errno(status); - goto bail; - } - main_bm_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!main_bm_inode) { status = -EINVAL; mlog_errno(status); - goto bail; + goto out; } - ocfs2_handle_add_inode(handle, main_bm_inode); - status = ocfs2_meta_lock(main_bm_inode, handle, &main_bm_bh, 1); + mutex_lock(&main_bm_inode->i_mutex); + + status = ocfs2_meta_lock(main_bm_inode, &main_bm_bh, 1); if (status < 0) { mlog_errno(status); - goto bail; + goto out_mutex; } - handle = ocfs2_start_trans(osb, handle, OCFS2_WINDOW_MOVE_CREDITS); + handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; mlog_errno(status); - goto bail; + goto out_unlock; } /* we want the bitmap change to be recorded on disk asap */ - ocfs2_handle_set_sync(handle, 1); + handle->h_sync = 1; status = ocfs2_sync_local_to_main(osb, handle, alloc, main_bm_inode, main_bm_bh); if (status < 0) mlog_errno(status); -bail: - if (handle) - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); + +out_unlock: + ocfs2_meta_unlock(main_bm_inode, 1); + +out_mutex: + mutex_unlock(&main_bm_inode->i_mutex); if (main_bm_bh) brelse(main_bm_bh); - if (main_bm_inode) - iput(main_bm_inode); + iput(main_bm_inode); +out: mlog_exit(status); return status; } @@ -452,7 +447,6 @@ bail: * our own in order to shift windows. */ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb, - struct ocfs2_journal_handle *passed_handle, u32 bits_wanted, struct ocfs2_alloc_context *ac) { @@ -463,9 +457,7 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb, mlog_entry_void(); - BUG_ON(!passed_handle); BUG_ON(!ac); - BUG_ON(passed_handle->flags & OCFS2_HANDLE_STARTED); local_alloc_inode = ocfs2_get_system_file_inode(osb, @@ -476,7 +468,11 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb, mlog_errno(status); goto bail; } - ocfs2_handle_add_inode(passed_handle, local_alloc_inode); + + mutex_lock(&local_alloc_inode->i_mutex); + + ac->ac_inode = local_alloc_inode; + ac->ac_which = OCFS2_AC_USE_LOCAL; if (osb->local_alloc_state != OCFS2_LA_ENABLED) { status = -ENOSPC; @@ -515,21 +511,17 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb, } } - ac->ac_inode = igrab(local_alloc_inode); get_bh(osb->local_alloc_bh); ac->ac_bh = osb->local_alloc_bh; - ac->ac_which = OCFS2_AC_USE_LOCAL; status = 0; bail: - if (local_alloc_inode) - iput(local_alloc_inode); mlog_exit(status); return status; } int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct ocfs2_alloc_context *ac, u32 min_bits, u32 *bit_off, @@ -707,7 +699,7 @@ static void ocfs2_verify_zero_bits(unsigned long *bitmap, * passed is used for caching. */ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct ocfs2_dinode *alloc, struct inode *main_bm_inode, struct buffer_head *main_bm_bh) @@ -778,7 +770,6 @@ bail: } static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, struct ocfs2_alloc_context **ac, struct inode **bitmap_inode, struct buffer_head **bitmap_bh) @@ -792,7 +783,6 @@ static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb, goto bail; } - (*ac)->ac_handle = handle; (*ac)->ac_bits_wanted = ocfs2_local_alloc_window_bits(osb); status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac); @@ -821,7 +811,7 @@ bail: * pass it the bitmap lock in lock_bh if you have it. */ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct ocfs2_alloc_context *ac) { int status = 0; @@ -888,23 +878,15 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb, int status = 0; struct buffer_head *main_bm_bh = NULL; struct inode *main_bm_inode = NULL; - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle = NULL; struct ocfs2_dinode *alloc; struct ocfs2_dinode *alloc_copy = NULL; struct ocfs2_alloc_context *ac = NULL; mlog_entry_void(); - handle = ocfs2_alloc_handle(osb); - if (!handle) { - status = -ENOMEM; - mlog_errno(status); - goto bail; - } - /* This will lock the main bitmap for us. */ status = ocfs2_local_alloc_reserve_for_window(osb, - handle, &ac, &main_bm_inode, &main_bm_bh); @@ -914,7 +896,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb, goto bail; } - handle = ocfs2_start_trans(osb, handle, OCFS2_WINDOW_MOVE_CREDITS); + handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; @@ -972,7 +954,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb, status = 0; bail: if (handle) - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); if (main_bm_bh) brelse(main_bm_bh); diff --git a/fs/ocfs2/localalloc.h b/fs/ocfs2/localalloc.h index 30f88ce14e46..385a10152f9c 100644 --- a/fs/ocfs2/localalloc.h +++ b/fs/ocfs2/localalloc.h @@ -42,12 +42,11 @@ int ocfs2_alloc_should_use_local(struct ocfs2_super *osb, struct ocfs2_alloc_context; int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb, - struct ocfs2_journal_handle *passed_handle, u32 bits_wanted, struct ocfs2_alloc_context *ac); int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct ocfs2_alloc_context *ac, u32 min_bits, u32 *bit_off, diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 83934e33e5b0..69f85ae392dc 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c @@ -82,6 +82,8 @@ static struct vm_operations_struct ocfs2_file_vm_ops = { int ocfs2_mmap(struct file *file, struct vm_area_struct *vma) { + int ret = 0, lock_level = 0; + /* We don't want to support shared writable mappings yet. */ if (((vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_MAYSHARE)) && ((vma->vm_flags & VM_WRITE) || (vma->vm_flags & VM_MAYWRITE))) { @@ -91,7 +93,14 @@ int ocfs2_mmap(struct file *file, struct vm_area_struct *vma) return -EINVAL; } - file_accessed(file); + ret = ocfs2_meta_lock_atime(file->f_dentry->d_inode, + file->f_vfsmnt, &lock_level); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + ocfs2_meta_unlock(file->f_dentry->d_inode, lock_level); +out: vma->vm_ops = &ocfs2_file_vm_ops; return 0; } diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index a57b751d4f40..21db45ddf144 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -75,12 +75,12 @@ static int inline ocfs2_search_dirblock(struct buffer_head *bh, unsigned long offset, struct ocfs2_dir_entry **res_dir); -static int ocfs2_delete_entry(struct ocfs2_journal_handle *handle, +static int ocfs2_delete_entry(handle_t *handle, struct inode *dir, struct ocfs2_dir_entry *de_del, struct buffer_head *bh); -static int __ocfs2_add_entry(struct ocfs2_journal_handle *handle, +static int __ocfs2_add_entry(handle_t *handle, struct inode *dir, const char *name, int namelen, struct inode *inode, u64 blkno, @@ -93,43 +93,37 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, dev_t dev, struct buffer_head **new_fe_bh, struct buffer_head *parent_fe_bh, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode **ret_inode, struct ocfs2_alloc_context *inode_ac); static int ocfs2_fill_new_dir(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *parent, struct inode *inode, struct buffer_head *fe_bh, struct ocfs2_alloc_context *data_ac); -static int ocfs2_double_lock(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, - struct buffer_head **bh1, - struct inode *inode1, - struct buffer_head **bh2, - struct inode *inode2); - static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + struct inode **ret_orphan_dir, struct inode *inode, char *name, struct buffer_head **de_bh); static int ocfs2_orphan_add(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *inode, struct ocfs2_dinode *fe, char *name, - struct buffer_head *de_bh); + struct buffer_head *de_bh, + struct inode *orphan_dir_inode); static int ocfs2_create_symlink_data(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *inode, const char *symname); -static inline int ocfs2_add_entry(struct ocfs2_journal_handle *handle, +static inline int ocfs2_add_entry(handle_t *handle, struct dentry *dentry, struct inode *inode, u64 blkno, struct buffer_head *parent_fe_bh, @@ -165,7 +159,7 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry, mlog(0, "find name %.*s in directory %llu\n", dentry->d_name.len, dentry->d_name.name, (unsigned long long)OCFS2_I(dir)->ip_blkno); - status = ocfs2_meta_lock(dir, NULL, NULL, 0); + status = ocfs2_meta_lock(dir, NULL, 0); if (status < 0) { if (status != -ENOENT) mlog_errno(status); @@ -242,7 +236,7 @@ bail: } static int ocfs2_fill_new_dir(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *parent, struct inode *inode, struct buffer_head *fe_bh, @@ -317,7 +311,7 @@ static int ocfs2_mknod(struct inode *dir, { int status = 0; struct buffer_head *parent_fe_bh = NULL; - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle = NULL; struct ocfs2_super *osb; struct ocfs2_dinode *dirfe; struct buffer_head *new_fe_bh = NULL; @@ -333,18 +327,11 @@ static int ocfs2_mknod(struct inode *dir, /* get our super block */ osb = OCFS2_SB(dir->i_sb); - handle = ocfs2_alloc_handle(osb); - if (handle == NULL) { - status = -ENOMEM; - mlog_errno(status); - goto leave; - } - - status = ocfs2_meta_lock(dir, handle, &parent_fe_bh, 1); + status = ocfs2_meta_lock(dir, &parent_fe_bh, 1); if (status < 0) { if (status != -ENOENT) mlog_errno(status); - goto leave; + return status; } if (S_ISDIR(mode) && (dir->i_nlink >= OCFS2_LINK_MAX)) { @@ -374,7 +361,7 @@ static int ocfs2_mknod(struct inode *dir, } /* reserve an inode spot */ - status = ocfs2_reserve_new_inode(osb, handle, &inode_ac); + status = ocfs2_reserve_new_inode(osb, &inode_ac); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); @@ -384,7 +371,7 @@ static int ocfs2_mknod(struct inode *dir, /* are we making a directory? If so, reserve a cluster for his * 1st extent. */ if (S_ISDIR(mode)) { - status = ocfs2_reserve_clusters(osb, handle, 1, &data_ac); + status = ocfs2_reserve_clusters(osb, 1, &data_ac); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); @@ -392,7 +379,7 @@ static int ocfs2_mknod(struct inode *dir, } } - handle = ocfs2_start_trans(osb, handle, OCFS2_MKNOD_CREDITS); + handle = ocfs2_start_trans(osb, OCFS2_MKNOD_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; @@ -453,7 +440,9 @@ static int ocfs2_mknod(struct inode *dir, status = 0; leave: if (handle) - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); + + ocfs2_meta_unlock(dir, 1); if (status == -ENOSPC) mlog(0, "Disk is full\n"); @@ -487,7 +476,7 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, dev_t dev, struct buffer_head **new_fe_bh, struct buffer_head *parent_fe_bh, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode **ret_inode, struct ocfs2_alloc_context *inode_ac) { @@ -653,7 +642,7 @@ static int ocfs2_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle; struct inode *inode = old_dentry->d_inode; int err; struct buffer_head *fe_bh = NULL; @@ -666,68 +655,60 @@ static int ocfs2_link(struct dentry *old_dentry, old_dentry->d_name.len, old_dentry->d_name.name, dentry->d_name.len, dentry->d_name.name); - if (S_ISDIR(inode->i_mode)) { - err = -EPERM; - goto bail; - } - - handle = ocfs2_alloc_handle(osb); - if (handle == NULL) { - err = -ENOMEM; - goto bail; - } + if (S_ISDIR(inode->i_mode)) + return -EPERM; - err = ocfs2_meta_lock(dir, handle, &parent_fe_bh, 1); + err = ocfs2_meta_lock(dir, &parent_fe_bh, 1); if (err < 0) { if (err != -ENOENT) mlog_errno(err); - goto bail; + return err; } if (!dir->i_nlink) { err = -ENOENT; - goto bail; + goto out; } err = ocfs2_check_dir_for_entry(dir, dentry->d_name.name, dentry->d_name.len); if (err) - goto bail; + goto out; err = ocfs2_prepare_dir_for_insert(osb, dir, parent_fe_bh, dentry->d_name.name, dentry->d_name.len, &de_bh); if (err < 0) { mlog_errno(err); - goto bail; + goto out; } - err = ocfs2_meta_lock(inode, handle, &fe_bh, 1); + err = ocfs2_meta_lock(inode, &fe_bh, 1); if (err < 0) { if (err != -ENOENT) mlog_errno(err); - goto bail; + goto out; } fe = (struct ocfs2_dinode *) fe_bh->b_data; if (le16_to_cpu(fe->i_links_count) >= OCFS2_LINK_MAX) { err = -EMLINK; - goto bail; + goto out_unlock_inode; } - handle = ocfs2_start_trans(osb, handle, OCFS2_LINK_CREDITS); + handle = ocfs2_start_trans(osb, OCFS2_LINK_CREDITS); if (IS_ERR(handle)) { err = PTR_ERR(handle); handle = NULL; mlog_errno(err); - goto bail; + goto out_unlock_inode; } err = ocfs2_journal_access(handle, inode, fe_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (err < 0) { mlog_errno(err); - goto bail; + goto out_commit; } inc_nlink(inode); @@ -741,7 +722,7 @@ static int ocfs2_link(struct dentry *old_dentry, le16_add_cpu(&fe->i_links_count, -1); drop_nlink(inode); mlog_errno(err); - goto bail; + goto out_commit; } err = ocfs2_add_entry(handle, dentry, inode, @@ -751,21 +732,27 @@ static int ocfs2_link(struct dentry *old_dentry, le16_add_cpu(&fe->i_links_count, -1); drop_nlink(inode); mlog_errno(err); - goto bail; + goto out_commit; } err = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(dir)->ip_blkno); if (err) { mlog_errno(err); - goto bail; + goto out_commit; } atomic_inc(&inode->i_count); dentry->d_op = &ocfs2_dentry_ops; d_instantiate(dentry, inode); -bail: - if (handle) - ocfs2_commit_trans(handle); + +out_commit: + ocfs2_commit_trans(osb, handle); +out_unlock_inode: + ocfs2_meta_unlock(inode, 1); + +out: + ocfs2_meta_unlock(dir, 1); + if (de_bh) brelse(de_bh); if (fe_bh) @@ -812,13 +799,15 @@ static int ocfs2_unlink(struct inode *dir, struct dentry *dentry) { int status; + int child_locked = 0; struct inode *inode = dentry->d_inode; + struct inode *orphan_dir = NULL; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); u64 blkno; struct ocfs2_dinode *fe = NULL; struct buffer_head *fe_bh = NULL; struct buffer_head *parent_node_bh = NULL; - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle = NULL; struct ocfs2_dir_entry *dirent = NULL; struct buffer_head *dirent_bh = NULL; char orphan_name[OCFS2_ORPHAN_NAMELEN + 1]; @@ -833,22 +822,14 @@ static int ocfs2_unlink(struct inode *dir, if (inode == osb->root_inode) { mlog(0, "Cannot delete the root directory\n"); - status = -EPERM; - goto leave; + return -EPERM; } - handle = ocfs2_alloc_handle(osb); - if (handle == NULL) { - status = -ENOMEM; - mlog_errno(status); - goto leave; - } - - status = ocfs2_meta_lock(dir, handle, &parent_node_bh, 1); + status = ocfs2_meta_lock(dir, &parent_node_bh, 1); if (status < 0) { if (status != -ENOENT) mlog_errno(status); - goto leave; + return status; } status = ocfs2_find_files_on_disk(dentry->d_name.name, @@ -869,12 +850,13 @@ static int ocfs2_unlink(struct inode *dir, goto leave; } - status = ocfs2_meta_lock(inode, handle, &fe_bh, 1); + status = ocfs2_meta_lock(inode, &fe_bh, 1); if (status < 0) { if (status != -ENOENT) mlog_errno(status); goto leave; } + child_locked = 1; if (S_ISDIR(inode->i_mode)) { if (!ocfs2_empty_dir(inode)) { @@ -895,7 +877,7 @@ static int ocfs2_unlink(struct inode *dir, } if (inode_is_unlinkable(inode)) { - status = ocfs2_prepare_orphan_dir(osb, handle, inode, + status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, inode, orphan_name, &orphan_entry_bh); if (status < 0) { @@ -904,7 +886,7 @@ static int ocfs2_unlink(struct inode *dir, } } - handle = ocfs2_start_trans(osb, handle, OCFS2_UNLINK_CREDITS); + handle = ocfs2_start_trans(osb, OCFS2_UNLINK_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; @@ -923,7 +905,7 @@ static int ocfs2_unlink(struct inode *dir, if (inode_is_unlinkable(inode)) { status = ocfs2_orphan_add(osb, handle, inode, fe, orphan_name, - orphan_entry_bh); + orphan_entry_bh, orphan_dir); if (status < 0) { mlog_errno(status); goto leave; @@ -960,7 +942,19 @@ static int ocfs2_unlink(struct inode *dir, leave: if (handle) - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); + + if (child_locked) + ocfs2_meta_unlock(inode, 1); + + ocfs2_meta_unlock(dir, 1); + + if (orphan_dir) { + /* This was locked for us in ocfs2_prepare_orphan_dir() */ + ocfs2_meta_unlock(orphan_dir, 1); + mutex_unlock(&orphan_dir->i_mutex); + iput(orphan_dir); + } if (fe_bh) brelse(fe_bh); @@ -984,7 +978,6 @@ leave: * if they have the same id, then the 1st one is the only one locked. */ static int ocfs2_double_lock(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, struct buffer_head **bh1, struct inode *inode1, struct buffer_head **bh2, @@ -1000,8 +993,6 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, (unsigned long long)oi1->ip_blkno, (unsigned long long)oi2->ip_blkno); - BUG_ON(!handle); - if (*bh1) *bh1 = NULL; if (*bh2) @@ -1021,25 +1012,41 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, inode1 = tmpinode; } /* lock id2 */ - status = ocfs2_meta_lock(inode2, handle, bh2, 1); + status = ocfs2_meta_lock(inode2, bh2, 1); if (status < 0) { if (status != -ENOENT) mlog_errno(status); goto bail; } } + /* lock id1 */ - status = ocfs2_meta_lock(inode1, handle, bh1, 1); + status = ocfs2_meta_lock(inode1, bh1, 1); if (status < 0) { + /* + * An error return must mean that no cluster locks + * were held on function exit. + */ + if (oi1->ip_blkno != oi2->ip_blkno) + ocfs2_meta_unlock(inode2, 1); + if (status != -ENOENT) mlog_errno(status); - goto bail; } + bail: mlog_exit(status); return status; } +static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2) +{ + ocfs2_meta_unlock(inode1, 1); + + if (inode1 != inode2) + ocfs2_meta_unlock(inode2, 1); +} + #define PARENT_INO(buffer) \ ((struct ocfs2_dir_entry *) \ ((char *)buffer + \ @@ -1050,9 +1057,11 @@ static int ocfs2_rename(struct inode *old_dir, struct inode *new_dir, struct dentry *new_dentry) { - int status = 0, rename_lock = 0; + int status = 0, rename_lock = 0, parents_locked = 0; + int old_child_locked = 0, new_child_locked = 0; struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; + struct inode *orphan_dir = NULL; struct ocfs2_dinode *newfe = NULL; char orphan_name[OCFS2_ORPHAN_NAMELEN + 1]; struct buffer_head *orphan_entry_bh = NULL; @@ -1060,7 +1069,7 @@ static int ocfs2_rename(struct inode *old_dir, struct buffer_head *insert_entry_bh = NULL; struct ocfs2_super *osb = NULL; u64 newfe_blkno; - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle = NULL; struct buffer_head *old_dir_bh = NULL; struct buffer_head *new_dir_bh = NULL; struct ocfs2_dir_entry *old_de = NULL, *new_de = NULL; // dirent for old_dentry @@ -1105,21 +1114,14 @@ static int ocfs2_rename(struct inode *old_dir, rename_lock = 1; } - handle = ocfs2_alloc_handle(osb); - if (handle == NULL) { - status = -ENOMEM; - mlog_errno(status); - goto bail; - } - /* if old and new are the same, this'll just do one lock. */ - status = ocfs2_double_lock(osb, handle, - &old_dir_bh, old_dir, - &new_dir_bh, new_dir); + status = ocfs2_double_lock(osb, &old_dir_bh, old_dir, + &new_dir_bh, new_dir); if (status < 0) { mlog_errno(status); goto bail; } + parents_locked = 1; /* make sure both dirs have bhs * get an extra ref on old_dir_bh if old==new */ @@ -1140,12 +1142,13 @@ static int ocfs2_rename(struct inode *old_dir, * the vote thread on other nodes won't have to concurrently * downconvert the inode and the dentry locks. */ - status = ocfs2_meta_lock(old_inode, handle, NULL, 1); + status = ocfs2_meta_lock(old_inode, NULL, 1); if (status < 0) { if (status != -ENOENT) mlog_errno(status); goto bail; } + old_child_locked = 1; status = ocfs2_remote_dentry_delete(old_dentry); if (status < 0) { @@ -1231,12 +1234,13 @@ static int ocfs2_rename(struct inode *old_dir, goto bail; } - status = ocfs2_meta_lock(new_inode, handle, &newfe_bh, 1); + status = ocfs2_meta_lock(new_inode, &newfe_bh, 1); if (status < 0) { if (status != -ENOENT) mlog_errno(status); goto bail; } + new_child_locked = 1; status = ocfs2_remote_dentry_delete(new_dentry); if (status < 0) { @@ -1252,7 +1256,7 @@ static int ocfs2_rename(struct inode *old_dir, (unsigned long long)newfe_bh->b_blocknr : 0ULL); if (S_ISDIR(new_inode->i_mode) || (new_inode->i_nlink == 1)) { - status = ocfs2_prepare_orphan_dir(osb, handle, + status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, new_inode, orphan_name, &orphan_entry_bh); @@ -1280,7 +1284,7 @@ static int ocfs2_rename(struct inode *old_dir, } } - handle = ocfs2_start_trans(osb, handle, OCFS2_RENAME_CREDITS); + handle = ocfs2_start_trans(osb, OCFS2_RENAME_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; @@ -1307,7 +1311,7 @@ static int ocfs2_rename(struct inode *old_dir, (newfe->i_links_count == cpu_to_le16(1))){ status = ocfs2_orphan_add(osb, handle, new_inode, newfe, orphan_name, - orphan_entry_bh); + orphan_entry_bh, orphan_dir); if (status < 0) { mlog_errno(status); goto bail; @@ -1424,7 +1428,23 @@ bail: ocfs2_rename_unlock(osb); if (handle) - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); + + if (parents_locked) + ocfs2_double_unlock(old_dir, new_dir); + + if (old_child_locked) + ocfs2_meta_unlock(old_inode, 1); + + if (new_child_locked) + ocfs2_meta_unlock(new_inode, 1); + + if (orphan_dir) { + /* This was locked for us in ocfs2_prepare_orphan_dir() */ + ocfs2_meta_unlock(orphan_dir, 1); + mutex_unlock(&orphan_dir->i_mutex); + iput(orphan_dir); + } if (new_inode) sync_mapping_buffers(old_inode->i_mapping); @@ -1458,7 +1478,7 @@ bail: * data, including the null terminator. */ static int ocfs2_create_symlink_data(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *inode, const char *symname) { @@ -1573,7 +1593,7 @@ static int ocfs2_symlink(struct inode *dir, struct buffer_head *parent_fe_bh = NULL; struct ocfs2_dinode *fe = NULL; struct ocfs2_dinode *dirfe; - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle = NULL; struct ocfs2_alloc_context *inode_ac = NULL; struct ocfs2_alloc_context *data_ac = NULL; @@ -1587,19 +1607,12 @@ static int ocfs2_symlink(struct inode *dir, credits = ocfs2_calc_symlink_credits(sb); - handle = ocfs2_alloc_handle(osb); - if (handle == NULL) { - status = -ENOMEM; - mlog_errno(status); - goto bail; - } - /* lock the parent directory */ - status = ocfs2_meta_lock(dir, handle, &parent_fe_bh, 1); + status = ocfs2_meta_lock(dir, &parent_fe_bh, 1); if (status < 0) { if (status != -ENOENT) mlog_errno(status); - goto bail; + return status; } dirfe = (struct ocfs2_dinode *) parent_fe_bh->b_data; @@ -1622,7 +1635,7 @@ static int ocfs2_symlink(struct inode *dir, goto bail; } - status = ocfs2_reserve_new_inode(osb, handle, &inode_ac); + status = ocfs2_reserve_new_inode(osb, &inode_ac); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); @@ -1631,7 +1644,7 @@ static int ocfs2_symlink(struct inode *dir, /* don't reserve bitmap space for fast symlinks. */ if (l > ocfs2_fast_symlink_chars(sb)) { - status = ocfs2_reserve_clusters(osb, handle, 1, &data_ac); + status = ocfs2_reserve_clusters(osb, 1, &data_ac); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); @@ -1639,7 +1652,7 @@ static int ocfs2_symlink(struct inode *dir, } } - handle = ocfs2_start_trans(osb, handle, credits); + handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; @@ -1717,7 +1730,10 @@ static int ocfs2_symlink(struct inode *dir, d_instantiate(dentry, inode); bail: if (handle) - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); + + ocfs2_meta_unlock(dir, 1); + if (new_fe_bh) brelse(new_fe_bh); if (parent_fe_bh) @@ -1768,7 +1784,7 @@ int ocfs2_check_dir_entry(struct inode * dir, * If you pass me insert_bh, I'll skip the search of the other dir * blocks and put the record in there. */ -static int __ocfs2_add_entry(struct ocfs2_journal_handle *handle, +static int __ocfs2_add_entry(handle_t *handle, struct inode *dir, const char *name, int namelen, struct inode *inode, u64 blkno, @@ -1854,7 +1870,7 @@ bail: * ocfs2_delete_entry deletes a directory entry by merging it with the * previous entry */ -static int ocfs2_delete_entry(struct ocfs2_journal_handle *handle, +static int ocfs2_delete_entry(handle_t *handle, struct inode *dir, struct ocfs2_dir_entry *de_del, struct buffer_head *bh) @@ -2085,19 +2101,19 @@ bail: } static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + struct inode **ret_orphan_dir, struct inode *inode, char *name, struct buffer_head **de_bh) { - struct inode *orphan_dir_inode = NULL; + struct inode *orphan_dir_inode; struct buffer_head *orphan_dir_bh = NULL; int status = 0; status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name); if (status < 0) { mlog_errno(status); - goto leave; + return status; } orphan_dir_inode = ocfs2_get_system_file_inode(osb, @@ -2106,11 +2122,12 @@ static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, if (!orphan_dir_inode) { status = -ENOENT; mlog_errno(status); - goto leave; + return status; } - ocfs2_handle_add_inode(handle, orphan_dir_inode); - status = ocfs2_meta_lock(orphan_dir_inode, handle, &orphan_dir_bh, 1); + mutex_lock(&orphan_dir_inode->i_mutex); + + status = ocfs2_meta_lock(orphan_dir_inode, &orphan_dir_bh, 1); if (status < 0) { mlog_errno(status); goto leave; @@ -2120,13 +2137,19 @@ static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, orphan_dir_bh, name, OCFS2_ORPHAN_NAMELEN, de_bh); if (status < 0) { + ocfs2_meta_unlock(orphan_dir_inode, 1); + mlog_errno(status); goto leave; } + *ret_orphan_dir = orphan_dir_inode; + leave: - if (orphan_dir_inode) + if (status) { + mutex_unlock(&orphan_dir_inode->i_mutex); iput(orphan_dir_inode); + } if (orphan_dir_bh) brelse(orphan_dir_bh); @@ -2136,28 +2159,19 @@ leave: } static int ocfs2_orphan_add(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *inode, struct ocfs2_dinode *fe, char *name, - struct buffer_head *de_bh) + struct buffer_head *de_bh, + struct inode *orphan_dir_inode) { - struct inode *orphan_dir_inode = NULL; struct buffer_head *orphan_dir_bh = NULL; int status = 0; struct ocfs2_dinode *orphan_fe; mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino); - orphan_dir_inode = ocfs2_get_system_file_inode(osb, - ORPHAN_DIR_SYSTEM_INODE, - osb->slot_num); - if (!orphan_dir_inode) { - status = -ENOENT; - mlog_errno(status); - goto leave; - } - status = ocfs2_read_block(osb, OCFS2_I(orphan_dir_inode)->ip_blkno, &orphan_dir_bh, OCFS2_BH_CACHED, @@ -2209,9 +2223,6 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, (unsigned long long)OCFS2_I(inode)->ip_blkno, osb->slot_num); leave: - if (orphan_dir_inode) - iput(orphan_dir_inode); - if (orphan_dir_bh) brelse(orphan_dir_bh); @@ -2221,7 +2232,7 @@ leave: /* unlike orphan_add, we expect the orphan dir to already be locked here. */ int ocfs2_orphan_del(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *orphan_dir_inode, struct inode *inode, struct buffer_head *orphan_dir_bh) @@ -2300,4 +2311,5 @@ struct inode_operations ocfs2_dir_iops = { .rename = ocfs2_rename, .setattr = ocfs2_setattr, .getattr = ocfs2_getattr, + .permission = ocfs2_permission, }; diff --git a/fs/ocfs2/namei.h b/fs/ocfs2/namei.h index deaaa97dbf0b..8425944fcccd 100644 --- a/fs/ocfs2/namei.h +++ b/fs/ocfs2/namei.h @@ -39,7 +39,7 @@ struct buffer_head *ocfs2_find_entry(const char *name, struct inode *dir, struct ocfs2_dir_entry **res_dir); int ocfs2_orphan_del(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct inode *orphan_dir_inode, struct inode *inode, struct buffer_head *orphan_dir_bh); diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 0462a7f4e21b..b767fd7da6eb 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -34,6 +34,7 @@ #include <linux/workqueue.h> #include <linux/kref.h> #include <linux/mutex.h> +#include <linux/jbd.h> #include "cluster/nodemanager.h" #include "cluster/heartbeat.h" @@ -179,9 +180,9 @@ enum ocfs2_mount_options #define OCFS2_OSB_SOFT_RO 0x0001 #define OCFS2_OSB_HARD_RO 0x0002 #define OCFS2_OSB_ERROR_FS 0x0004 +#define OCFS2_DEFAULT_ATIME_QUANTUM 60 struct ocfs2_journal; -struct ocfs2_journal_handle; struct ocfs2_super { struct task_struct *commit_task; @@ -218,6 +219,7 @@ struct ocfs2_super unsigned long osb_flags; unsigned long s_mount_opt; + unsigned int s_atime_quantum; u16 max_slots; s16 node_num; @@ -283,7 +285,7 @@ struct ocfs2_super /* Truncate log info */ struct inode *osb_tl_inode; struct buffer_head *osb_tl_bh; - struct work_struct osb_truncate_log_wq; + struct delayed_work osb_truncate_log_wq; struct ocfs2_node_map osb_recovering_orphan_dirs; unsigned int *osb_orphan_wipes; diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 9d91e66f51a9..000d71cca6c5 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -49,7 +49,7 @@ static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg); static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe); static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl); -static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle, +static int ocfs2_block_group_fill(handle_t *handle, struct inode *alloc_inode, struct buffer_head *bg_bh, u64 group_blkno, @@ -59,9 +59,6 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb, struct inode *alloc_inode, struct buffer_head *bh); -static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb, - struct ocfs2_alloc_context *ac); - static int ocfs2_cluster_group_search(struct inode *inode, struct buffer_head *group_bh, u32 bits_wanted, u32 min_bits, @@ -72,6 +69,7 @@ static int ocfs2_block_group_search(struct inode *inode, u16 *bit_off, u16 *bits_found); static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, struct ocfs2_alloc_context *ac, + handle_t *handle, u32 bits_wanted, u32 min_bits, u16 *bit_off, @@ -79,20 +77,20 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, u64 *bg_blkno); static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, int nr); -static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle, +static inline int ocfs2_block_group_set_bits(handle_t *handle, struct inode *alloc_inode, struct ocfs2_group_desc *bg, struct buffer_head *group_bh, unsigned int bit_off, unsigned int num_bits); -static inline int ocfs2_block_group_clear_bits(struct ocfs2_journal_handle *handle, +static inline int ocfs2_block_group_clear_bits(handle_t *handle, struct inode *alloc_inode, struct ocfs2_group_desc *bg, struct buffer_head *group_bh, unsigned int bit_off, unsigned int num_bits); -static int ocfs2_relink_block_group(struct ocfs2_journal_handle *handle, +static int ocfs2_relink_block_group(handle_t *handle, struct inode *alloc_inode, struct buffer_head *fe_bh, struct buffer_head *bg_bh, @@ -100,7 +98,7 @@ static int ocfs2_relink_block_group(struct ocfs2_journal_handle *handle, u16 chain); static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg, u32 wanted); -static int ocfs2_free_suballoc_bits(struct ocfs2_journal_handle *handle, +static int ocfs2_free_suballoc_bits(handle_t *handle, struct inode *alloc_inode, struct buffer_head *alloc_bh, unsigned int start_bit, @@ -120,8 +118,16 @@ static inline void ocfs2_block_to_cluster_group(struct inode *inode, void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac) { - if (ac->ac_inode) - iput(ac->ac_inode); + struct inode *inode = ac->ac_inode; + + if (inode) { + if (ac->ac_which != OCFS2_AC_USE_LOCAL) + ocfs2_meta_unlock(inode, 1); + + mutex_unlock(&inode->i_mutex); + + iput(inode); + } if (ac->ac_bh) brelse(ac->ac_bh); kfree(ac); @@ -190,7 +196,7 @@ static int ocfs2_check_group_descriptor(struct super_block *sb, return 0; } -static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle, +static int ocfs2_block_group_fill(handle_t *handle, struct inode *alloc_inode, struct buffer_head *bg_bh, u64 group_blkno, @@ -273,7 +279,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb, struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data; struct ocfs2_chain_list *cl; struct ocfs2_alloc_context *ac = NULL; - struct ocfs2_journal_handle *handle = NULL; + handle_t *handle = NULL; u32 bit_off, num_bits; u16 alloc_rec; u64 bg_blkno; @@ -284,16 +290,8 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb, mlog_entry_void(); - handle = ocfs2_alloc_handle(osb); - if (!handle) { - status = -ENOMEM; - mlog_errno(status); - goto bail; - } - cl = &fe->id2.i_chain; status = ocfs2_reserve_clusters(osb, - handle, le16_to_cpu(cl->cl_cpg), &ac); if (status < 0) { @@ -304,7 +302,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb, credits = ocfs2_calc_group_alloc_credits(osb->sb, le16_to_cpu(cl->cl_cpg)); - handle = ocfs2_start_trans(osb, handle, credits); + handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; @@ -389,7 +387,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb, status = 0; bail: if (handle) - ocfs2_commit_trans(handle); + ocfs2_commit_trans(osb, handle); if (ac) ocfs2_free_alloc_context(ac); @@ -402,27 +400,38 @@ bail: } static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb, - struct ocfs2_alloc_context *ac) + struct ocfs2_alloc_context *ac, + int type, + u32 slot) { int status; u32 bits_wanted = ac->ac_bits_wanted; - struct inode *alloc_inode = ac->ac_inode; + struct inode *alloc_inode; struct buffer_head *bh = NULL; - struct ocfs2_journal_handle *handle = ac->ac_handle; struct ocfs2_dinode *fe; u32 free_bits; mlog_entry_void(); - BUG_ON(handle->flags & OCFS2_HANDLE_STARTED); + alloc_inode = ocfs2_get_system_file_inode(osb, type, slot); + if (!alloc_inode) { + mlog_errno(-EINVAL); + return -EINVAL; + } - ocfs2_handle_add_inode(handle, alloc_inode); - status = ocfs2_meta_lock(alloc_inode, handle, &bh, 1); + mutex_lock(&alloc_inode->i_mutex); + + status = ocfs2_meta_lock(alloc_inode, &bh, 1); if (status < 0) { + mutex_unlock(&alloc_inode->i_mutex); + iput(alloc_inode); + mlog_errno(status); - goto bail; + return status; } + ac->ac_inode = alloc_inode; + fe = (struct ocfs2_dinode *) bh->b_data; if (!OCFS2_IS_VALID_DINODE(fe)) { OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe); @@ -473,12 +482,11 @@ bail: } int ocfs2_reserve_new_metadata(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, struct ocfs2_dinode *fe, struct ocfs2_alloc_context **ac) { int status; - struct inode *alloc_inode = NULL; + u32 slot; *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL); if (!(*ac)) { @@ -488,28 +496,18 @@ int ocfs2_reserve_new_metadata(struct ocfs2_super *osb, } (*ac)->ac_bits_wanted = ocfs2_extend_meta_needed(fe); - (*ac)->ac_handle = handle; (*ac)->ac_which = OCFS2_AC_USE_META; #ifndef OCFS2_USE_ALL_METADATA_SUBALLOCATORS - alloc_inode = ocfs2_get_system_file_inode(osb, - EXTENT_ALLOC_SYSTEM_INODE, - 0); + slot = 0; #else - alloc_inode = ocfs2_get_system_file_inode(osb, - EXTENT_ALLOC_SYSTEM_INODE, - osb->slot_num); + slot = osb->slot_num; #endif - if (!alloc_inode) { - status = -ENOMEM; - mlog_errno(status); - goto bail; - } - (*ac)->ac_inode = igrab(alloc_inode); (*ac)->ac_group_search = ocfs2_block_group_search; - status = ocfs2_reserve_suballoc_bits(osb, (*ac)); + status = ocfs2_reserve_suballoc_bits(osb, (*ac), + EXTENT_ALLOC_SYSTEM_INODE, slot); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); @@ -523,19 +521,14 @@ bail: *ac = NULL; } - if (alloc_inode) - iput(alloc_inode); - mlog_exit(status); return status; } int ocfs2_reserve_new_inode(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, struct ocfs2_alloc_context **ac) { int status; - struct inode *alloc_inode = NULL; *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL); if (!(*ac)) { @@ -545,22 +538,13 @@ int ocfs2_reserve_new_inode(struct ocfs2_super *osb, } (*ac)->ac_bits_wanted = 1; - (*ac)->ac_handle = handle; (*ac)->ac_which = OCFS2_AC_USE_INODE; - alloc_inode = ocfs2_get_system_file_inode(osb, - INODE_ALLOC_SYSTEM_INODE, - osb->slot_num); - if (!alloc_inode) { - status = -ENOMEM; - mlog_errno(status); - goto bail; - } - - (*ac)->ac_inode = igrab(alloc_inode); (*ac)->ac_group_search = ocfs2_block_group_search; - status = ocfs2_reserve_suballoc_bits(osb, *ac); + status = ocfs2_reserve_suballoc_bits(osb, *ac, + INODE_ALLOC_SYSTEM_INODE, + osb->slot_num); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); @@ -574,9 +558,6 @@ bail: *ac = NULL; } - if (alloc_inode) - iput(alloc_inode); - mlog_exit(status); return status; } @@ -588,20 +569,17 @@ int ocfs2_reserve_cluster_bitmap_bits(struct ocfs2_super *osb, { int status; - ac->ac_inode = ocfs2_get_system_file_inode(osb, - GLOBAL_BITMAP_SYSTEM_INODE, - OCFS2_INVALID_SLOT); - if (!ac->ac_inode) { - status = -EINVAL; - mlog(ML_ERROR, "Could not get bitmap inode!\n"); - goto bail; - } ac->ac_which = OCFS2_AC_USE_MAIN; ac->ac_group_search = ocfs2_cluster_group_search; - status = ocfs2_reserve_suballoc_bits(osb, ac); - if (status < 0 && status != -ENOSPC) + status = ocfs2_reserve_suballoc_bits(osb, ac, + GLOBAL_BITMAP_SYSTEM_INODE, + OCFS2_INVALID_SLOT); + if (status < 0 && status != -ENOSPC) { mlog_errno(status); + goto bail; + } + bail: return status; } @@ -610,7 +588,6 @@ bail: * use so we figure it out for them, but unfortunately this clutters * things a bit. */ int ocfs2_reserve_clusters(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, u32 bits_wanted, struct ocfs2_alloc_context **ac) { @@ -618,8 +595,6 @@ int ocfs2_reserve_clusters(struct ocfs2_super *osb, mlog_entry_void(); - BUG_ON(!handle); - *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL); if (!(*ac)) { status = -ENOMEM; @@ -628,12 +603,10 @@ int ocfs2_reserve_clusters(struct ocfs2_super *osb, } (*ac)->ac_bits_wanted = bits_wanted; - (*ac)->ac_handle = handle; status = -ENOSPC; if (ocfs2_alloc_should_use_local(osb, bits_wanted)) { status = ocfs2_reserve_local_alloc_bits(osb, - handle, bits_wanted, *ac); if ((status < 0) && (status != -ENOSPC)) { @@ -774,7 +747,7 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, return status; } -static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle, +static inline int ocfs2_block_group_set_bits(handle_t *handle, struct inode *alloc_inode, struct ocfs2_group_desc *bg, struct buffer_head *group_bh, @@ -845,7 +818,7 @@ static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl) return best; } -static int ocfs2_relink_block_group(struct ocfs2_journal_handle *handle, +static int ocfs2_relink_block_group(handle_t *handle, struct inode *alloc_inode, struct buffer_head *fe_bh, struct buffer_head *bg_bh, @@ -1025,7 +998,7 @@ static int ocfs2_block_group_search(struct inode *inode, } static int ocfs2_alloc_dinode_update_counts(struct inode *inode, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct buffer_head *di_bh, u32 num_bits, u16 chain) @@ -1055,6 +1028,7 @@ out: } static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac, + handle_t *handle, u32 bits_wanted, u32 min_bits, u16 *bit_off, @@ -1067,7 +1041,6 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac, struct buffer_head *group_bh = NULL; struct ocfs2_group_desc *gd; struct inode *alloc_inode = ac->ac_inode; - struct ocfs2_journal_handle *handle = ac->ac_handle; ret = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb), gd_blkno, &group_bh, OCFS2_BH_CACHED, alloc_inode); @@ -1115,6 +1088,7 @@ out: } static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, + handle_t *handle, u32 bits_wanted, u32 min_bits, u16 *bit_off, @@ -1126,7 +1100,6 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, u16 chain, tmp_bits; u32 tmp_used; u64 next_group; - struct ocfs2_journal_handle *handle = ac->ac_handle; struct inode *alloc_inode = ac->ac_inode; struct buffer_head *group_bh = NULL; struct buffer_head *prev_group_bh = NULL; @@ -1272,6 +1245,7 @@ bail: /* will give out up to bits_wanted contiguous bits. */ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, struct ocfs2_alloc_context *ac, + handle_t *handle, u32 bits_wanted, u32 min_bits, u16 *bit_off, @@ -1313,8 +1287,8 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, * by jumping straight to the most recently used * allocation group. This helps us mantain some * contiguousness across allocations. */ - status = ocfs2_search_one_group(ac, bits_wanted, min_bits, - bit_off, num_bits, + status = ocfs2_search_one_group(ac, handle, bits_wanted, + min_bits, bit_off, num_bits, hint_blkno, &bits_left); if (!status) { /* Be careful to update *bg_blkno here as the @@ -1336,7 +1310,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, ac->ac_chain = victim; ac->ac_allow_chain_relink = 1; - status = ocfs2_search_chain(ac, bits_wanted, min_bits, bit_off, + status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, bit_off, num_bits, bg_blkno, &bits_left); if (!status) goto set_hint; @@ -1360,7 +1334,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, continue; ac->ac_chain = i; - status = ocfs2_search_chain(ac, bits_wanted, min_bits, + status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, bit_off, num_bits, bg_blkno, &bits_left); if (!status) @@ -1388,7 +1362,7 @@ bail: } int ocfs2_claim_metadata(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct ocfs2_alloc_context *ac, u32 bits_wanted, u16 *suballoc_bit_start, @@ -1401,10 +1375,10 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb, BUG_ON(!ac); BUG_ON(ac->ac_bits_wanted < (ac->ac_bits_given + bits_wanted)); BUG_ON(ac->ac_which != OCFS2_AC_USE_META); - BUG_ON(ac->ac_handle != handle); status = ocfs2_claim_suballoc_bits(osb, ac, + handle, bits_wanted, 1, suballoc_bit_start, @@ -1425,7 +1399,7 @@ bail: } int ocfs2_claim_new_inode(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct ocfs2_alloc_context *ac, u16 *suballoc_bit, u64 *fe_blkno) @@ -1440,10 +1414,10 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb, BUG_ON(ac->ac_bits_given != 0); BUG_ON(ac->ac_bits_wanted != 1); BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE); - BUG_ON(ac->ac_handle != handle); status = ocfs2_claim_suballoc_bits(osb, ac, + handle, 1, 1, suballoc_bit, @@ -1528,7 +1502,7 @@ static inline void ocfs2_block_to_cluster_group(struct inode *inode, * of any size. */ int ocfs2_claim_clusters(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct ocfs2_alloc_context *ac, u32 min_clusters, u32 *cluster_start, @@ -1546,7 +1520,6 @@ int ocfs2_claim_clusters(struct ocfs2_super *osb, BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL && ac->ac_which != OCFS2_AC_USE_MAIN); - BUG_ON(ac->ac_handle != handle); if (ac->ac_which == OCFS2_AC_USE_LOCAL) { status = ocfs2_claim_local_alloc_bits(osb, @@ -1572,6 +1545,7 @@ int ocfs2_claim_clusters(struct ocfs2_super *osb, status = ocfs2_claim_suballoc_bits(osb, ac, + handle, bits_wanted, min_clusters, &bg_bit_off, @@ -1598,7 +1572,7 @@ bail: return status; } -static inline int ocfs2_block_group_clear_bits(struct ocfs2_journal_handle *handle, +static inline int ocfs2_block_group_clear_bits(handle_t *handle, struct inode *alloc_inode, struct ocfs2_group_desc *bg, struct buffer_head *group_bh, @@ -1653,7 +1627,7 @@ bail: /* * expects the suballoc inode to already be locked. */ -static int ocfs2_free_suballoc_bits(struct ocfs2_journal_handle *handle, +static int ocfs2_free_suballoc_bits(handle_t *handle, struct inode *alloc_inode, struct buffer_head *alloc_bh, unsigned int start_bit, @@ -1737,7 +1711,7 @@ static inline u64 ocfs2_which_suballoc_group(u64 block, unsigned int bit) return group; } -int ocfs2_free_dinode(struct ocfs2_journal_handle *handle, +int ocfs2_free_dinode(handle_t *handle, struct inode *inode_alloc_inode, struct buffer_head *inode_alloc_bh, struct ocfs2_dinode *di) @@ -1750,7 +1724,7 @@ int ocfs2_free_dinode(struct ocfs2_journal_handle *handle, inode_alloc_bh, bit, bg_blkno, 1); } -int ocfs2_free_extent_block(struct ocfs2_journal_handle *handle, +int ocfs2_free_extent_block(handle_t *handle, struct inode *eb_alloc_inode, struct buffer_head *eb_alloc_bh, struct ocfs2_extent_block *eb) @@ -1763,7 +1737,7 @@ int ocfs2_free_extent_block(struct ocfs2_journal_handle *handle, bit, bg_blkno, 1); } -int ocfs2_free_clusters(struct ocfs2_journal_handle *handle, +int ocfs2_free_clusters(handle_t *handle, struct inode *bitmap_inode, struct buffer_head *bitmap_bh, u64 start_blk, diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index c787838d1052..1a3c94cb9250 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h @@ -43,7 +43,6 @@ struct ocfs2_alloc_context { #define OCFS2_AC_USE_INODE 3 #define OCFS2_AC_USE_META 4 u32 ac_which; - struct ocfs2_journal_handle *ac_handle; /* these are used by the chain search */ u16 ac_chain; @@ -60,45 +59,42 @@ static inline int ocfs2_alloc_context_bits_left(struct ocfs2_alloc_context *ac) } int ocfs2_reserve_new_metadata(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, struct ocfs2_dinode *fe, struct ocfs2_alloc_context **ac); int ocfs2_reserve_new_inode(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, struct ocfs2_alloc_context **ac); int ocfs2_reserve_clusters(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, u32 bits_wanted, struct ocfs2_alloc_context **ac); int ocfs2_claim_metadata(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct ocfs2_alloc_context *ac, u32 bits_wanted, u16 *suballoc_bit_start, u32 *num_bits, u64 *blkno_start); int ocfs2_claim_new_inode(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct ocfs2_alloc_context *ac, u16 *suballoc_bit, u64 *fe_blkno); int ocfs2_claim_clusters(struct ocfs2_super *osb, - struct ocfs2_journal_handle *handle, + handle_t *handle, struct ocfs2_alloc_context *ac, u32 min_clusters, u32 *cluster_start, u32 *num_clusters); -int ocfs2_free_dinode(struct ocfs2_journal_handle *handle, +int ocfs2_free_dinode(handle_t *handle, struct inode *inode_alloc_inode, struct buffer_head *inode_alloc_bh, struct ocfs2_dinode *di); -int ocfs2_free_extent_block(struct ocfs2_journal_handle *handle, +int ocfs2_free_extent_block(handle_t *handle, struct inode *eb_alloc_inode, struct buffer_head *eb_alloc_bh, struct ocfs2_extent_block *eb); -int ocfs2_free_clusters(struct ocfs2_journal_handle *handle, +int ocfs2_free_clusters(handle_t *handle, struct inode *bitmap_inode, struct buffer_head *bitmap_bh, u64 start_blk, diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 76b46ebbb10c..4bf39540e652 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -68,9 +68,7 @@ #include "buffer_head_io.h" -static kmem_cache_t *ocfs2_inode_cachep = NULL; - -kmem_cache_t *ocfs2_lock_cache = NULL; +static struct kmem_cache *ocfs2_inode_cachep = NULL; /* OCFS2 needs to schedule several differnt types of work which * require cluster locking, disk I/O, recovery waits, etc. Since these @@ -141,6 +139,7 @@ enum { Opt_hb_local, Opt_data_ordered, Opt_data_writeback, + Opt_atime_quantum, Opt_err, }; @@ -154,6 +153,7 @@ static match_table_t tokens = { {Opt_hb_local, OCFS2_HB_LOCAL}, {Opt_data_ordered, "data=ordered"}, {Opt_data_writeback, "data=writeback"}, + {Opt_atime_quantum, "atime_quantum=%u"}, {Opt_err, NULL} }; @@ -303,7 +303,7 @@ static struct inode *ocfs2_alloc_inode(struct super_block *sb) { struct ocfs2_inode_info *oi; - oi = kmem_cache_alloc(ocfs2_inode_cachep, SLAB_NOFS); + oi = kmem_cache_alloc(ocfs2_inode_cachep, GFP_NOFS); if (!oi) return NULL; @@ -707,6 +707,7 @@ static int ocfs2_parse_options(struct super_block *sb, while ((p = strsep(&options, ",")) != NULL) { int token, option; substring_t args[MAX_OPT_ARGS]; + struct ocfs2_super * osb = OCFS2_SB(sb); if (!*p) continue; @@ -747,6 +748,16 @@ static int ocfs2_parse_options(struct super_block *sb, case Opt_data_writeback: *mount_opt |= OCFS2_MOUNT_DATA_WRITEBACK; break; + case Opt_atime_quantum: + if (match_int(&args[0], &option)) { + status = 0; + goto bail; + } + if (option >= 0) + osb->s_atime_quantum = option; + else + osb->s_atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM; + break; default: mlog(ML_ERROR, "Unrecognized mount option \"%s\" " @@ -867,7 +878,7 @@ static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf) goto bail; } - status = ocfs2_meta_lock(inode, NULL, &bh, 0); + status = ocfs2_meta_lock(inode, &bh, 0); if (status < 0) { mlog_errno(status); goto bail; @@ -903,7 +914,7 @@ bail: } static void ocfs2_inode_init_once(void *data, - kmem_cache_t *cachep, + struct kmem_cache *cachep, unsigned long flags) { struct ocfs2_inode_info *oi = data; @@ -914,9 +925,7 @@ static void ocfs2_inode_init_once(void *data, oi->ip_open_count = 0; spin_lock_init(&oi->ip_lock); ocfs2_extent_map_init(&oi->vfs_inode); - INIT_LIST_HEAD(&oi->ip_handle_list); INIT_LIST_HEAD(&oi->ip_io_markers); - oi->ip_handle = NULL; oi->ip_created_trans = 0; oi->ip_last_trans = 0; oi->ip_dir_start_lookup = 0; @@ -948,14 +957,6 @@ static int ocfs2_initialize_mem_caches(void) if (!ocfs2_inode_cachep) return -ENOMEM; - ocfs2_lock_cache = kmem_cache_create("ocfs2_lock", - sizeof(struct ocfs2_journal_lock), - 0, - SLAB_HWCACHE_ALIGN, - NULL, NULL); - if (!ocfs2_lock_cache) - return -ENOMEM; - return 0; } @@ -963,11 +964,8 @@ static void ocfs2_free_mem_caches(void) { if (ocfs2_inode_cachep) kmem_cache_destroy(ocfs2_inode_cachep); - if (ocfs2_lock_cache) - kmem_cache_destroy(ocfs2_lock_cache); ocfs2_inode_cachep = NULL; - ocfs2_lock_cache = NULL; } static int ocfs2_get_sector(struct super_block *sb, @@ -1280,6 +1278,8 @@ static int ocfs2_initialize_super(struct super_block *sb, init_waitqueue_head(&osb->checkpoint_event); atomic_set(&osb->needs_checkpoint, 0); + osb->s_atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM; + osb->node_num = O2NM_INVALID_NODE_NUM; osb->slot_num = OCFS2_INVALID_SLOT; @@ -1365,7 +1365,7 @@ static int ocfs2_initialize_super(struct super_block *sb, spin_lock_init(&journal->j_lock); journal->j_trans_id = (unsigned long) 1; INIT_LIST_HEAD(&journal->j_la_cleanups); - INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery, osb); + INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery); journal->j_state = OCFS2_JOURNAL_FREE; /* get some pseudo constants for clustersize bits */ @@ -1674,7 +1674,7 @@ void __ocfs2_error(struct super_block *sb, va_list args; va_start(args, fmt); - vsprintf(error_buf, fmt, args); + vsnprintf(error_buf, sizeof(error_buf), fmt, args); va_end(args); /* Not using mlog here because we want to show the actual @@ -1695,7 +1695,7 @@ void __ocfs2_abort(struct super_block* sb, va_list args; va_start(args, fmt); - vsprintf(error_buf, fmt, args); + vsnprintf(error_buf, sizeof(error_buf), fmt, args); va_end(args); printk(KERN_CRIT "OCFS2: abort (device %s): %s: %s\n", diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index c0f68aa6c175..957d6878b03e 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c @@ -126,6 +126,10 @@ static int ocfs2_readlink(struct dentry *dentry, goto out; } + /* + * Without vfsmount we can't update atime now, + * but we will update atime here ultimately. + */ ret = vfs_readlink(dentry, buffer, buflen, link); brelse(bh); diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c index 9707ed7a3206..39814b900fc0 100644 --- a/fs/ocfs2/uptodate.c +++ b/fs/ocfs2/uptodate.c @@ -69,7 +69,7 @@ struct ocfs2_meta_cache_item { sector_t c_block; }; -static kmem_cache_t *ocfs2_uptodate_cachep = NULL; +static struct kmem_cache *ocfs2_uptodate_cachep = NULL; void ocfs2_metadata_cache_init(struct inode *inode) { diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c index 592a6402e851..26f44e0074ec 100644 --- a/fs/openpromfs/inode.c +++ b/fs/openpromfs/inode.c @@ -330,13 +330,13 @@ out: return 0; } -static kmem_cache_t *op_inode_cachep; +static struct kmem_cache *op_inode_cachep; static struct inode *openprom_alloc_inode(struct super_block *sb) { struct op_inode_info *oi; - oi = kmem_cache_alloc(op_inode_cachep, SLAB_KERNEL); + oi = kmem_cache_alloc(op_inode_cachep, GFP_KERNEL); if (!oi) return NULL; @@ -415,7 +415,7 @@ static struct file_system_type openprom_fs_type = { .kill_sb = kill_anon_super, }; -static void op_inode_init_once(void *data, kmem_cache_t * cachep, unsigned long flags) +static void op_inode_init_once(void *data, struct kmem_cache * cachep, unsigned long flags) { struct op_inode_info *oi = (struct op_inode_info *) data; diff --git a/fs/partitions/amiga.c b/fs/partitions/amiga.c index 3068528890a6..9917a8c360f2 100644 --- a/fs/partitions/amiga.c +++ b/fs/partitions/amiga.c @@ -43,6 +43,7 @@ amiga_partition(struct parsed_partitions *state, struct block_device *bdev) if (warn_no_part) printk("Dev %s: unable to read RDB block %d\n", bdevname(bdev, b), blk); + res = -1; goto rdb_done; } if (*(__be32 *)data != cpu_to_be32(IDNAME_RIGIDDISK)) @@ -79,6 +80,7 @@ amiga_partition(struct parsed_partitions *state, struct block_device *bdev) if (warn_no_part) printk("Dev %s: unable to read partition block %d\n", bdevname(bdev, b), blk); + res = -1; goto rdb_done; } pb = (struct PartitionBlock *)data; diff --git a/fs/partitions/atari.c b/fs/partitions/atari.c index 192a6adfdefd..1f3572d5b755 100644 --- a/fs/partitions/atari.c +++ b/fs/partitions/atari.c @@ -88,7 +88,7 @@ int atari_partition(struct parsed_partitions *state, struct block_device *bdev) if (!xrs) { printk (" block %ld read failed\n", partsect); put_dev_sector(sect); - return 0; + return -1; } /* ++roman: sanity check: bit 0 of flg field must be set */ diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 6fb4b6150d77..1901137f4eca 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -153,7 +153,7 @@ static struct parsed_partitions * check_partition(struct gendisk *hd, struct block_device *bdev) { struct parsed_partitions *state; - int i, res; + int i, res, err; state = kmalloc(sizeof(struct parsed_partitions), GFP_KERNEL); if (!state) @@ -165,19 +165,30 @@ check_partition(struct gendisk *hd, struct block_device *bdev) sprintf(state->name, "p"); state->limit = hd->minors; - i = res = 0; + i = res = err = 0; while (!res && check_part[i]) { memset(&state->parts, 0, sizeof(state->parts)); res = check_part[i++](state, bdev); + if (res < 0) { + /* We have hit an I/O error which we don't report now. + * But record it, and let the others do their job. + */ + err = res; + res = 0; + } + } if (res > 0) return state; + if (!err) + /* The partition is unrecognized. So report I/O errors if there were any */ + res = err; if (!res) printk(" unknown partition table\n"); else if (warn_no_part) printk(" unable to read partition table\n"); kfree(state); - return NULL; + return ERR_PTR(res); } /* @@ -494,6 +505,8 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev) disk->fops->revalidate_disk(disk); if (!get_capacity(disk) || !(state = check_partition(disk, bdev))) return 0; + if (IS_ERR(state)) /* I/O error reading the partition table */ + return PTR_ERR(state); for (p = 1; p < state->limit; p++) { sector_t size = state->parts[p].size; sector_t from = state->parts[p].from; diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c index d352a7381fed..9f7ad4244f63 100644 --- a/fs/partitions/ibm.c +++ b/fs/partitions/ibm.c @@ -43,7 +43,7 @@ cchhb2blk (struct vtoc_cchhb *ptr, struct hd_geometry *geo) { int ibm_partition(struct parsed_partitions *state, struct block_device *bdev) { - int blocksize, offset, size; + int blocksize, offset, size,res; loff_t i_size; dasd_information_t *info; struct hd_geometry *geo; @@ -56,15 +56,16 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) unsigned char *data; Sector sect; + res = 0; blocksize = bdev_hardsect_size(bdev); if (blocksize <= 0) - return 0; + goto out_exit; i_size = i_size_read(bdev->bd_inode); if (i_size == 0) - return 0; + goto out_exit; if ((info = kmalloc(sizeof(dasd_information_t), GFP_KERNEL)) == NULL) - goto out_noinfo; + goto out_exit; if ((geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL)) == NULL) goto out_nogeo; if ((label = kmalloc(sizeof(union label_t), GFP_KERNEL)) == NULL) @@ -72,7 +73,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) if (ioctl_by_bdev(bdev, BIODASDINFO, (unsigned long)info) != 0 || ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0) - goto out_noioctl; + goto out_freeall; /* * Get volume label, extract name and type. @@ -92,6 +93,8 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) EBCASC(type, 4); EBCASC(name, 6); + res = 1; + /* * Three different types: CMS1, VOL1 and LNX1/unlabeled */ @@ -156,6 +159,9 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) counter++; blk++; } + if (!data) + /* Are we not supposed to report this ? */ + goto out_readerr; } else { /* * Old style LNX1 or unlabeled disk @@ -171,18 +177,17 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) } printk("\n"); - kfree(label); - kfree(geo); - kfree(info); - return 1; + goto out_freeall; + out_readerr: -out_noioctl: + res = -1; +out_freeall: kfree(label); out_nolab: kfree(geo); out_nogeo: kfree(info); -out_noinfo: - return 0; +out_exit: + return res; } diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c index c0871002d00d..d4a0fad3563b 100644 --- a/fs/partitions/mac.c +++ b/fs/partitions/mac.c @@ -74,6 +74,8 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev) be32_to_cpu(part->start_block) * (secsize/512), be32_to_cpu(part->block_count) * (secsize/512)); + if (!strnicmp(part->type, "Linux_RAID", 10)) + state->parts[slot].flags = 1; #ifdef CONFIG_PPC_PMAC /* * If this is the first bootable partition, tell the diff --git a/fs/pipe.c b/fs/pipe.c index b1626f269a34..ae36b89b1a37 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -830,7 +830,14 @@ void free_pipe_info(struct inode *inode) static struct vfsmount *pipe_mnt __read_mostly; static int pipefs_delete_dentry(struct dentry *dentry) { - return 1; + /* + * At creation time, we pretended this dentry was hashed + * (by clearing DCACHE_UNHASHED bit in d_flags) + * At delete time, we restore the truth : not hashed. + * (so that dput() can proceed correctly) + */ + dentry->d_flags |= DCACHE_UNHASHED; + return 0; } static struct dentry_operations pipefs_dentry_operations = { @@ -891,17 +898,22 @@ struct file *create_write_pipe(void) if (!inode) goto err_file; - sprintf(name, "[%lu]", inode->i_ino); + this.len = sprintf(name, "[%lu]", inode->i_ino); this.name = name; - this.len = strlen(name); - this.hash = inode->i_ino; /* will go */ + this.hash = 0; err = -ENOMEM; dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &this); if (!dentry) goto err_inode; dentry->d_op = &pipefs_dentry_operations; - d_add(dentry, inode); + /* + * We dont want to publish this dentry into global dentry hash table. + * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED + * This permits a working /proc/$pid/fd/XXX on pipes + */ + dentry->d_flags &= ~DCACHE_UNHASHED; + d_instantiate(dentry, inode); f->f_vfsmnt = mntget(pipe_mnt); f->f_dentry = dentry; f->f_mapping = inode->i_mapping; diff --git a/fs/proc/Makefile b/fs/proc/Makefile index 7431d7ba2d09..f6c776272572 100644 --- a/fs/proc/Makefile +++ b/fs/proc/Makefile @@ -8,8 +8,9 @@ proc-y := nommu.o task_nommu.o proc-$(CONFIG_MMU) := mmu.o task_mmu.o proc-y += inode.o root.o base.o generic.o array.o \ - kmsg.o proc_tty.o proc_misc.o + proc_tty.o proc_misc.o proc-$(CONFIG_PROC_KCORE) += kcore.o proc-$(CONFIG_PROC_VMCORE) += vmcore.o proc-$(CONFIG_PROC_DEVICETREE) += proc_devtree.o +proc-$(CONFIG_PRINTK) += kmsg.o diff --git a/fs/proc/base.c b/fs/proc/base.c index 8df27401d292..b859fc749c07 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -442,7 +442,8 @@ static int mountstats_open(struct inode *inode, struct file *file) if (task) { task_lock(task); - namespace = task->nsproxy->namespace; + if (task->nsproxy) + namespace = task->nsproxy->namespace; if (namespace) get_namespace(namespace); task_unlock(task); @@ -682,8 +683,6 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf, char buffer[PROC_NUMBUF], *end; int oom_adjust; - if (!capable(CAP_SYS_RESOURCE)) - return -EPERM; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; @@ -698,6 +697,10 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf, task = get_proc_task(file->f_dentry->d_inode); if (!task) return -ESRCH; + if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) { + put_task_struct(task); + return -EACCES; + } task->oomkilladj = oom_adjust; put_task_struct(task); if (end - buffer == 0) @@ -1882,8 +1885,9 @@ out: return; } -struct dentry *proc_pid_instantiate(struct inode *dir, - struct dentry * dentry, struct task_struct *task, void *ptr) +static struct dentry *proc_pid_instantiate(struct inode *dir, + struct dentry * dentry, + struct task_struct *task, void *ptr) { struct dentry *error = ERR_PTR(-ENOENT); struct inode *inode; diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 49dfb2ab783e..e26945ba685b 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -81,14 +81,14 @@ static void proc_read_inode(struct inode * inode) inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; } -static kmem_cache_t * proc_inode_cachep; +static struct kmem_cache * proc_inode_cachep; static struct inode *proc_alloc_inode(struct super_block *sb) { struct proc_inode *ei; struct inode *inode; - ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, SLAB_KERNEL); + ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL); if (!ei) return NULL; ei->pid = NULL; @@ -105,7 +105,7 @@ static void proc_destroy_inode(struct inode *inode) kmem_cache_free(proc_inode_cachep, PROC_I(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct proc_inode *ei = (struct proc_inode *) foo; diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 1294eda4acae..1be73082edd3 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -22,6 +22,7 @@ #include <asm/uaccess.h> #include <asm/io.h> +#define CORE_STR "CORE" static int open_kcore(struct inode * inode, struct file * filp) { @@ -82,10 +83,11 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen) } *elf_buflen = sizeof(struct elfhdr) + (*nphdr + 2)*sizeof(struct elf_phdr) + - 3 * (sizeof(struct elf_note) + 4) + - sizeof(struct elf_prstatus) + - sizeof(struct elf_prpsinfo) + - sizeof(struct task_struct); + 3 * ((sizeof(struct elf_note)) + + roundup(sizeof(CORE_STR), 4)) + + roundup(sizeof(struct elf_prstatus), 4) + + roundup(sizeof(struct elf_prpsinfo), 4) + + roundup(sizeof(struct task_struct), 4); *elf_buflen = PAGE_ALIGN(*elf_buflen); return size + *elf_buflen; } @@ -210,7 +212,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) nhdr->p_offset = offset; /* set up the process status */ - notes[0].name = "CORE"; + notes[0].name = CORE_STR; notes[0].type = NT_PRSTATUS; notes[0].datasz = sizeof(struct elf_prstatus); notes[0].data = &prstatus; @@ -221,7 +223,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) bufp = storenote(¬es[0], bufp); /* set up the process info */ - notes[1].name = "CORE"; + notes[1].name = CORE_STR; notes[1].type = NT_PRPSINFO; notes[1].datasz = sizeof(struct elf_prpsinfo); notes[1].data = &prpsinfo; @@ -238,7 +240,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) bufp = storenote(¬es[1], bufp); /* set up the task structure */ - notes[2].name = "CORE"; + notes[2].name = CORE_STR; notes[2].type = NT_TASKSTRUCT; notes[2].datasz = sizeof(struct task_struct); notes[2].data = current; diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 93c43b676e59..51815cece6f3 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c @@ -696,9 +696,11 @@ void __init proc_misc_init(void) proc_symlink("mounts", NULL, "self/mounts"); /* And now for trickier ones */ +#ifdef CONFIG_PRINTK entry = create_proc_entry("kmsg", S_IRUSR, &proc_root); if (entry) entry->proc_fops = &proc_kmsg_operations; +#endif create_seq_entry("devices", 0, &proc_devinfo_operations); create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations); #ifdef CONFIG_BLOCK diff --git a/fs/proc/root.c b/fs/proc/root.c index ffe66c38488b..64d242b6dcfa 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -13,6 +13,7 @@ #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/init.h> +#include <linux/sched.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/smp_lock.h> diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c index 5a41db2a218d..c047dc654d5c 100644 --- a/fs/qnx4/inode.c +++ b/fs/qnx4/inode.c @@ -515,12 +515,12 @@ static void qnx4_read_inode(struct inode *inode) brelse(bh); } -static kmem_cache_t *qnx4_inode_cachep; +static struct kmem_cache *qnx4_inode_cachep; static struct inode *qnx4_alloc_inode(struct super_block *sb) { struct qnx4_inode_info *ei; - ei = kmem_cache_alloc(qnx4_inode_cachep, SLAB_KERNEL); + ei = kmem_cache_alloc(qnx4_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -531,7 +531,7 @@ static void qnx4_destroy_inode(struct inode *inode) kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode)); } -static void init_once(void *foo, kmem_cache_t * cachep, +static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) { struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo; diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index b67ce9354048..373d862c3f87 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -74,7 +74,8 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp) igrab(inode); reiserfs_warning(inode->i_sb, "pinning inode %lu because the " - "preallocation can't be freed"); + "preallocation can't be freed", + inode->i_ino); goto out; } } @@ -316,12 +317,11 @@ static int reiserfs_allocate_blocks_for_region(struct reiserfs_transaction_handl /* area filled with zeroes, to supply as list of zero blocknumbers We allocate it outside of loop just in case loop would spin for several iterations. */ - char *zeros = kmalloc(to_paste * UNFM_P_SIZE, GFP_ATOMIC); // We cannot insert more than MAX_ITEM_LEN bytes anyway. + char *zeros = kzalloc(to_paste * UNFM_P_SIZE, GFP_ATOMIC); // We cannot insert more than MAX_ITEM_LEN bytes anyway. if (!zeros) { res = -ENOMEM; goto error_exit_free_blocks; } - memset(zeros, 0, to_paste * UNFM_P_SIZE); do { to_paste = min_t(__u64, hole_size, @@ -406,6 +406,8 @@ static int reiserfs_allocate_blocks_for_region(struct reiserfs_transaction_handl we restart it. This will also free the path. */ if (journal_transaction_should_end (th, th->t_blocks_allocated)) { + inode->i_size = cpu_key_k_offset(&key) + + (to_paste << inode->i_blkbits); res = restart_transaction(th, inode, &path); @@ -1044,6 +1046,7 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode char *kaddr = kmap_atomic(prepared_pages[0], KM_USER0); memset(kaddr, 0, from); kunmap_atomic(kaddr, KM_USER0); + flush_dcache_page(prepared_pages[0]); } if (to != PAGE_CACHE_SIZE) { /* Last page needs to be partially zeroed */ char *kaddr = @@ -1051,6 +1054,7 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode KM_USER0); memset(kaddr + to, 0, PAGE_CACHE_SIZE - to); kunmap_atomic(kaddr, KM_USER0); + flush_dcache_page(prepared_pages[num_pages - 1]); } /* Since all blocks are new - use already calculated value */ @@ -1184,6 +1188,7 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode memset(kaddr + block_start, 0, from - block_start); kunmap_atomic(kaddr, KM_USER0); + flush_dcache_page(prepared_pages[0]); set_buffer_uptodate(bh); } } @@ -1221,6 +1226,7 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode KM_USER0); memset(kaddr + to, 0, block_end - to); kunmap_atomic(kaddr, KM_USER0); + flush_dcache_page(prepared_pages[num_pages - 1]); set_buffer_uptodate(bh); } } @@ -1306,56 +1312,8 @@ static ssize_t reiserfs_file_write(struct file *file, /* the file we are going t count = MAX_NON_LFS - (unsigned long)*ppos; } - if (file->f_flags & O_DIRECT) { // Direct IO needs treatment - ssize_t result, after_file_end = 0; - if ((*ppos + count >= inode->i_size) - || (file->f_flags & O_APPEND)) { - /* If we are appending a file, we need to put this savelink in here. - If we will crash while doing direct io, finish_unfinished will - cut the garbage from the file end. */ - reiserfs_write_lock(inode->i_sb); - err = - journal_begin(&th, inode->i_sb, - JOURNAL_PER_BALANCE_CNT); - if (err) { - reiserfs_write_unlock(inode->i_sb); - return err; - } - reiserfs_update_inode_transaction(inode); - add_save_link(&th, inode, 1 /* Truncate */ ); - after_file_end = 1; - err = - journal_end(&th, inode->i_sb, - JOURNAL_PER_BALANCE_CNT); - reiserfs_write_unlock(inode->i_sb); - if (err) - return err; - } - result = do_sync_write(file, buf, count, ppos); - - if (after_file_end) { /* Now update i_size and remove the savelink */ - struct reiserfs_transaction_handle th; - reiserfs_write_lock(inode->i_sb); - err = journal_begin(&th, inode->i_sb, 1); - if (err) { - reiserfs_write_unlock(inode->i_sb); - return err; - } - reiserfs_update_inode_transaction(inode); - mark_inode_dirty(inode); - err = journal_end(&th, inode->i_sb, 1); - if (err) { - reiserfs_write_unlock(inode->i_sb); - return err; - } - err = remove_save_link(inode, 1 /* truncate */ ); - reiserfs_write_unlock(inode->i_sb); - if (err) - return err; - } - - return result; - } + if (file->f_flags & O_DIRECT) + return do_sync_write(file, buf, count, ppos); if (unlikely((ssize_t) count < 0)) return -EINVAL; diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 9c69bcacad22..254239e6f9e3 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -216,11 +216,12 @@ static int file_capable(struct inode *inode, long block) BUG_ON(!th->t_trans_id); BUG_ON(!th->t_refcount); + pathrelse(path); + /* we cannot restart while nested */ if (th->t_refcount > 1) { return 0; } - pathrelse(path); reiserfs_update_sd(th, inode); err = journal_end(th, s, len); if (!err) { @@ -928,15 +929,12 @@ int reiserfs_get_block(struct inode *inode, sector_t block, if (blocks_needed == 1) { un = &unf_single; } else { - un = kmalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_ATOMIC); // We need to avoid scheduling. + un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_ATOMIC); // We need to avoid scheduling. if (!un) { un = &unf_single; blocks_needed = 1; max_to_insert = 0; - } else - memset(un, 0, - UNFM_P_SIZE * min(blocks_needed, - max_to_insert)); + } } if (blocks_needed <= max_to_insert) { /* we are going to add target block to the file. Use allocated diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 85ce23268302..7280a23ef344 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -104,7 +104,7 @@ static int release_journal_dev(struct super_block *super, struct reiserfs_journal *journal); static int dirty_one_transaction(struct super_block *s, struct reiserfs_journal_list *jl); -static void flush_async_commits(void *p); +static void flush_async_commits(struct work_struct *work); static void queue_log_writer(struct super_block *s); /* values for join in do_journal_begin_r */ @@ -1464,7 +1464,7 @@ static int flush_journal_list(struct super_block *s, } /* if someone has this block in a newer transaction, just make - ** sure they are commited, and don't try writing it to disk + ** sure they are committed, and don't try writing it to disk */ if (pjl) { if (atomic_read(&pjl->j_commit_left)) @@ -2836,7 +2836,8 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name, if (reiserfs_mounted_fs_count <= 1) commit_wq = create_workqueue("reiserfs"); - INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb); + INIT_DELAYED_WORK(&journal->j_work, flush_async_commits); + journal->j_work_sb = p_s_sb; return 0; free_and_return: free_journal_ram(p_s_sb); @@ -3384,7 +3385,7 @@ static int remove_from_transaction(struct super_block *p_s_sb, /* ** for any cnode in a journal list, it can only be dirtied of all the -** transactions that include it are commited to disk. +** transactions that include it are committed to disk. ** this checks through each transaction, and returns 1 if you are allowed to dirty, ** and 0 if you aren't ** @@ -3426,7 +3427,7 @@ static int can_dirty(struct reiserfs_journal_cnode *cn) } /* syncs the commit blocks, but does not force the real buffers to disk -** will wait until the current transaction is done/commited before returning +** will wait until the current transaction is done/committed before returning */ int journal_end_sync(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) @@ -3447,10 +3448,11 @@ int journal_end_sync(struct reiserfs_transaction_handle *th, /* ** writeback the pending async commits to disk */ -static void flush_async_commits(void *p) +static void flush_async_commits(struct work_struct *work) { - struct super_block *p_s_sb = p; - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); + struct reiserfs_journal *journal = + container_of(work, struct reiserfs_journal, j_work.work); + struct super_block *p_s_sb = journal->j_work_sb; struct reiserfs_journal_list *jl; struct list_head *entry; diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 17249994110f..7fb5fb036f90 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -490,13 +490,13 @@ static void reiserfs_put_super(struct super_block *s) return; } -static kmem_cache_t *reiserfs_inode_cachep; +static struct kmem_cache *reiserfs_inode_cachep; static struct inode *reiserfs_alloc_inode(struct super_block *sb) { struct reiserfs_inode_info *ei; ei = (struct reiserfs_inode_info *) - kmem_cache_alloc(reiserfs_inode_cachep, SLAB_KERNEL); + kmem_cache_alloc(reiserfs_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -507,7 +507,7 @@ static void reiserfs_destroy_inode(struct inode *inode) kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode)); } -static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) { struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo; @@ -1549,13 +1549,12 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) struct reiserfs_sb_info *sbi; int errval = -EINVAL; - sbi = kmalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL); + sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL); if (!sbi) { errval = -ENOMEM; goto error; } s->s_fs_info = sbi; - memset(sbi, 0, sizeof(struct reiserfs_sb_info)); /* Set default values for options: non-aggressive tails, RO on errors */ REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL); REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO); diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 7bdb0ed443e1..1e4d68590178 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -41,7 +41,7 @@ #include <linux/reiserfs_xattr.h> #include <linux/reiserfs_acl.h> #include <asm/uaccess.h> -#include <asm/checksum.h> +#include <net/checksum.h> #include <linux/smp_lock.h> #include <linux/stat.h> #include <asm/semaphore.h> diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c index ddcd9e1ef282..c5af088d4a4c 100644 --- a/fs/romfs/inode.c +++ b/fs/romfs/inode.c @@ -550,12 +550,12 @@ romfs_read_inode(struct inode *i) } } -static kmem_cache_t * romfs_inode_cachep; +static struct kmem_cache * romfs_inode_cachep; static struct inode *romfs_alloc_inode(struct super_block *sb) { struct romfs_inode_info *ei; - ei = (struct romfs_inode_info *)kmem_cache_alloc(romfs_inode_cachep, SLAB_KERNEL); + ei = (struct romfs_inode_info *)kmem_cache_alloc(romfs_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -566,7 +566,7 @@ static void romfs_destroy_inode(struct inode *inode) kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct romfs_inode_info *ei = (struct romfs_inode_info *) foo; diff --git a/fs/seq_file.c b/fs/seq_file.c index 555b9ac04c25..10690aa401c7 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -26,7 +26,7 @@ * ERR_PTR(error). In the end of sequence they return %NULL. ->show() * returns 0 in case of success and negative number in case of error. */ -int seq_open(struct file *file, struct seq_operations *op) +int seq_open(struct file *file, const struct seq_operations *op) { struct seq_file *p = file->private_data; @@ -408,7 +408,7 @@ EXPORT_SYMBOL(single_open); int single_release(struct inode *inode, struct file *file) { - struct seq_operations *op = ((struct seq_file *)file->private_data)->op; + const struct seq_operations *op = ((struct seq_file *)file->private_data)->op; int res = seq_release(inode, file); kfree(op); return res; diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c index 2c122ee83adb..4af4cd729a5a 100644 --- a/fs/smbfs/inode.c +++ b/fs/smbfs/inode.c @@ -50,12 +50,12 @@ static void smb_put_super(struct super_block *); static int smb_statfs(struct dentry *, struct kstatfs *); static int smb_show_options(struct seq_file *, struct vfsmount *); -static kmem_cache_t *smb_inode_cachep; +static struct kmem_cache *smb_inode_cachep; static struct inode *smb_alloc_inode(struct super_block *sb) { struct smb_inode_info *ei; - ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, SLAB_KERNEL); + ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -66,7 +66,7 @@ static void smb_destroy_inode(struct inode *inode) kmem_cache_free(smb_inode_cachep, SMB_I(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct smb_inode_info *ei = (struct smb_inode_info *) foo; unsigned long flagmask = SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR; diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c index 0fb74697abc4..a4bcae8a9aff 100644 --- a/fs/smbfs/request.c +++ b/fs/smbfs/request.c @@ -25,7 +25,7 @@ #define ROUND_UP(x) (((x)+3) & ~3) /* cache for request structures */ -static kmem_cache_t *req_cachep; +static struct kmem_cache *req_cachep; static int smb_request_send_req(struct smb_request *req); @@ -61,7 +61,7 @@ static struct smb_request *smb_do_alloc_request(struct smb_sb_info *server, struct smb_request *req; unsigned char *buf = NULL; - req = kmem_cache_alloc(req_cachep, SLAB_KERNEL); + req = kmem_cache_alloc(req_cachep, GFP_KERNEL); VERBOSE("allocating request: %p\n", req); if (!req) goto out; diff --git a/fs/stat.c b/fs/stat.c index bca07eb2003c..a0ebfc7f8a64 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -51,13 +51,6 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) return inode->i_op->getattr(mnt, dentry, stat); generic_fillattr(inode, stat); - if (!stat->blksize) { - struct super_block *s = inode->i_sb; - unsigned blocks; - blocks = (stat->size+s->s_blocksize-1) >> s->s_blocksize_bits; - stat->blocks = (s->s_blocksize / 512) * blocks; - stat->blksize = s->s_blocksize; - } return 0; } diff --git a/fs/super.c b/fs/super.c index 47e554c12e76..84c320f6ad7e 100644 --- a/fs/super.c +++ b/fs/super.c @@ -221,6 +221,24 @@ static int grab_super(struct super_block *s) __releases(sb_lock) } /* + * Superblock locking. We really ought to get rid of these two. + */ +void lock_super(struct super_block * sb) +{ + get_fs_excl(); + mutex_lock(&sb->s_lock); +} + +void unlock_super(struct super_block * sb) +{ + put_fs_excl(); + mutex_unlock(&sb->s_lock); +} + +EXPORT_SYMBOL(lock_super); +EXPORT_SYMBOL(unlock_super); + +/* * Write out and wait upon all dirty data associated with this * superblock. Filesystem data as well as the underlying block * device. Takes the superblock lock. Requires a second blkdev diff --git a/fs/sync.c b/fs/sync.c index 1de747b5ddb9..865f32be386e 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -6,6 +6,7 @@ #include <linux/file.h> #include <linux/fs.h> #include <linux/module.h> +#include <linux/sched.h> #include <linux/writeback.h> #include <linux/syscalls.h> #include <linux/linkage.h> diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index 3aa3434621ca..a5782e8c7f07 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -372,6 +372,51 @@ int sysfs_rename_dir(struct kobject * kobj, const char *new_name) return error; } +int sysfs_move_dir(struct kobject *kobj, struct kobject *new_parent) +{ + struct dentry *old_parent_dentry, *new_parent_dentry, *new_dentry; + struct sysfs_dirent *new_parent_sd, *sd; + int error; + + if (!new_parent) + return -EINVAL; + + old_parent_dentry = kobj->parent ? + kobj->parent->dentry : sysfs_mount->mnt_sb->s_root; + new_parent_dentry = new_parent->dentry; + +again: + mutex_lock(&old_parent_dentry->d_inode->i_mutex); + if (!mutex_trylock(&new_parent_dentry->d_inode->i_mutex)) { + mutex_unlock(&old_parent_dentry->d_inode->i_mutex); + goto again; + } + + new_parent_sd = new_parent_dentry->d_fsdata; + sd = kobj->dentry->d_fsdata; + + new_dentry = lookup_one_len(kobj->name, new_parent_dentry, + strlen(kobj->name)); + if (IS_ERR(new_dentry)) { + error = PTR_ERR(new_dentry); + goto out; + } else + error = 0; + d_add(new_dentry, NULL); + d_move(kobj->dentry, new_dentry); + dput(new_dentry); + + /* Remove from old parent's list and insert into new parent's list. */ + list_del_init(&sd->s_sibling); + list_add(&sd->s_sibling, &new_parent_sd->s_children); + +out: + mutex_unlock(&new_parent_dentry->d_inode->i_mutex); + mutex_unlock(&old_parent_dentry->d_inode->i_mutex); + + return error; +} + static int sysfs_dir_open(struct inode *inode, struct file *file) { struct dentry * dentry = file->f_dentry; diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 298303b5a716..95c165101c98 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -190,6 +190,9 @@ fill_write_buffer(struct sysfs_buffer * buffer, const char __user * buf, size_t count = PAGE_SIZE - 1; error = copy_from_user(buffer->page,buf,count); buffer->needs_read_fill = 1; + /* if buf is assumed to contain a string, terminate it by \0, + so e.g. sscanf() can scan the string easily */ + buffer->page[count] = 0; return error ? -EFAULT : count; } diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c index 20551a1b8a56..e503f858fba8 100644 --- a/fs/sysfs/mount.c +++ b/fs/sysfs/mount.c @@ -16,7 +16,7 @@ struct vfsmount *sysfs_mount; struct super_block * sysfs_sb = NULL; -kmem_cache_t *sysfs_dir_cachep; +struct kmem_cache *sysfs_dir_cachep; static struct super_operations sysfs_ops = { .statfs = simple_statfs, diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h index 6f3d6bd52887..bd7cec295dab 100644 --- a/fs/sysfs/sysfs.h +++ b/fs/sysfs/sysfs.h @@ -1,6 +1,6 @@ extern struct vfsmount * sysfs_mount; -extern kmem_cache_t *sysfs_dir_cachep; +extern struct kmem_cache *sysfs_dir_cachep; extern struct inode * sysfs_new_inode(mode_t mode, struct sysfs_dirent *); extern int sysfs_create(struct dentry *, int mode, int (*init)(struct inode *)); diff --git a/fs/sysv/CHANGES b/fs/sysv/CHANGES deleted file mode 100644 index 66ea6e92be66..000000000000 --- a/fs/sysv/CHANGES +++ /dev/null @@ -1,60 +0,0 @@ -Mon, 15 Dec 1997 Krzysztof G. Baranowski <kgb@manjak.knm.org.pl> - * namei.c: struct sysv_dir_inode_operations updated to use dentries. - -Fri, 23 Jan 1998 Krzysztof G. Baranowski <kgb@manjak.knm.org.pl> - * inode.c: corrected 1 track offset setting (in sb->sv_block_base). - Originally it was overridden (by setting to zero) - in detected_[xenix,sysv4,sysv2,coherent]. Thanks - to Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl> - for identifying the problem. - -Tue, 27 Jan 1998 Krzysztof G. Baranowski <kgb@manjak.knm.org.pl> - * inode.c: added 2048-byte block support to SystemV FS. - Merged detected_bs[512,1024,2048]() into one function: - void detected_bs (u_char type, struct super_block *sb). - Thanks to Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl> - for the patch. - -Wed, 4 Feb 1998 Krzysztof G. Baranowski <kgb@manjak.knm.org.pl> - * namei.c: removed static subdir(); is_subdir() from dcache.c - is used instead. Cosmetic changes. - -Thu, 3 Dec 1998 Al Viro (viro@parcelfarce.linux.theplanet.co.uk) - * namei.c (sysv_rmdir): - Bugectomy: old check for victim being busy - (inode->i_count) wasn't replaced (with checking - dentry->d_count) and escaped Linus in the last round - of changes. Shot and buried. - -Wed, 9 Dec 1998 AV - * namei.c (do_sysv_rename): - Fixed incorrect check for other owners + race. - Removed checks that went to VFS. - * namei.c (sysv_unlink): - Removed checks that went to VFS. - -Thu, 10 Dec 1998 AV - * namei.c (do_mknod): - Removed dead code - mknod is never asked to - create a symlink or directory. Incidentially, - it wouldn't do it right if it would be called. - -Sat, 26 Dec 1998 KGB - * inode.c (detect_sysv4): - Added detection of expanded s_type field (0x10, - 0x20 and 0x30). Forced read-only access in this case. - -Sun, 21 Mar 1999 AV - * namei.c (sysv_link): - Fixed i_count usage that resulted in dcache corruption. - * inode.c: - Filled ->delete_inode() method with sysv_delete_inode(). - sysv_put_inode() is gone, as it tried to do ->delete_ - _inode()'s job. - * ialloc.c: (sysv_free_inode): - Fixed race. - -Sun, 30 Apr 1999 AV - * namei.c (sysv_mknod): - Removed dead code (S_IFREG case is now passed to - ->create() by VFS). diff --git a/fs/sysv/ChangeLog b/fs/sysv/ChangeLog deleted file mode 100644 index f403f8b91b80..000000000000 --- a/fs/sysv/ChangeLog +++ /dev/null @@ -1,106 +0,0 @@ -Thu Feb 14 2002 Andrew Morton <akpm@zip.com.au> - - * dir_commit_chunk(): call writeout_one_page() as well as - waitfor_one_page() for IS_SYNC directories, so that we - actually do sync the directory. (forward-port from 2.4). - -Thu Feb 7 2002 Alexander Viro <viro@parcelfarce.linux.theplanet.co.uk> - - * super.c: switched to ->get_sb() - * ChangeLog: fixed dates ;-) - -2002-01-24 David S. Miller <davem@redhat.com> - - * inode.c: Include linux/init.h - -Mon Jan 21 2002 Alexander Viro <viro@parcelfarce.linux.theplanet.co.uk> - * ialloc.c (sysv_new_inode): zero SYSV_I(inode)->i_data out. - * i_vnode renamed to vfs_inode. Sorry, but let's keep that - consistent. - -Sat Jan 19 2002 Christoph Hellwig <hch@infradead.org> - - * include/linux/sysv_fs.h (SYSV_I): Get fs-private inode data using - list_entry() instead of inode->u. - * include/linux/sysv_fs_i.h: Add 'struct inode i_vnode' field to - sysv_inode_info structure. - * inode.c: Include <linux/slab.h>, implement alloc_inode/destroy_inode - sop methods, add infrastructure for per-fs inode slab cache. - * super.c (init_sysv_fs): Initialize inode cache, recover properly - in the case of failed register_filesystem for V7. - (exit_sysv_fs): Destroy inode cache. - -Sat Jan 19 2002 Christoph Hellwig <hch@infradead.org> - - * include/linux/sysv_fs.h: Include <linux/sysv_fs_i.h>, declare SYSV_I(). - * dir.c (sysv_find_entry): Use SYSV_I() instead of ->u.sysv_i to - access fs-private inode data. - * ialloc.c (sysv_new_inode): Likewise. - * inode.c (sysv_read_inode): Likewise. - (sysv_update_inode): Likewise. - * itree.c (get_branch): Likewise. - (sysv_truncate): Likewise. - * symlink.c (sysv_readlink): Likewise. - (sysv_follow_link): Likewise. - -Fri Jan 4 2002 Alexander Viro <viro@parcelfarce.linux.theplanet.co.uk> - - * ialloc.c (sysv_free_inode): Use sb->s_id instead of bdevname(). - * inode.c (sysv_read_inode): Likewise. - (sysv_update_inode): Likewise. - (sysv_sync_inode): Likewise. - * super.c (detect_sysv): Likewise. - (complete_read_super): Likewise. - (sysv_read_super): Likewise. - (v7_read_super): Likewise. - -Sun Dec 30 2001 Manfred Spraul <manfred@colorfullife.com> - - * dir.c (dir_commit_chunk): Do not set dir->i_version. - (sysv_readdir): Likewise. - -Thu Dec 27 2001 Alexander Viro <viro@parcelfarce.linux.theplanet.co.uk> - - * itree.c (get_block): Use map_bh() to fill out bh_result. - -Tue Dec 25 2001 Alexander Viro <viro@parcelfarce.linux.theplanet.co.uk> - - * super.c (sysv_read_super): Use sb_set_blocksize() to set blocksize. - (v7_read_super): Likewise. - -Tue Nov 27 2001 Alexander Viro <viro@parcelfarce.linux.theplanet.co.uk> - - * itree.c (get_block): Change type for iblock argument to sector_t. - * super.c (sysv_read_super): Set s_blocksize early. - (v7_read_super): Likewise. - * balloc.c (sysv_new_block): Use sb_bread(). instead of bread(). - (sysv_count_free_blocks): Likewise. - * ialloc.c (sysv_raw_inode): Likewise. - * itree.c (get_branch): Likewise. - (free_branches): Likewise. - * super.c (sysv_read_super): Likewise. - (v7_read_super): Likewise. - -Sat Dec 15 2001 Christoph Hellwig <hch@infradead.org> - - * inode.c (sysv_read_inode): Mark inode as bad in case of failure. - * super.c (complete_read_super): Check for bad root inode. - -Wed Nov 21 2001 Andrew Morton <andrewm@uow.edu.au> - - * file.c (sysv_sync_file): Call fsync_inode_data_buffers. - -Fri Oct 26 2001 Christoph Hellwig <hch@infradead.org> - - * dir.c, ialloc.c, namei.c, include/linux/sysv_fs_i.h: - Implement per-Inode lookup offset cache. - Modelled after Ted's ext2 patch. - -Fri Oct 26 2001 Christoph Hellwig <hch@infradead.org> - - * inode.c, super.c, include/linux/sysv_fs.h, - include/linux/sysv_fs_sb.h: - Remove symlink faking. Noone really wants to use these as - linux filesystems and native OSes don't support it anyway. - - diff --git a/fs/sysv/INTRO b/fs/sysv/INTRO deleted file mode 100644 index de4e4d17cac6..000000000000 --- a/fs/sysv/INTRO +++ /dev/null @@ -1,182 +0,0 @@ -This is the implementation of the SystemV/Coherent filesystem for Linux. -It grew out of separate filesystem implementations - - Xenix FS Doug Evans <dje@cygnus.com> June 1992 - SystemV FS Paul B. Monday <pmonday@eecs.wsu.edu> March-June 1993 - Coherent FS B. Haible <haible@ma2s2.mathematik.uni-karlsruhe.de> June 1993 - -and was merged together in July 1993. - -These filesystems are rather similar. Here is a comparison with Minix FS: - -* Linux fdisk reports on partitions - - Minix FS 0x81 Linux/Minix - - Xenix FS ?? - - SystemV FS ?? - - Coherent FS 0x08 AIX bootable - -* Size of a block or zone (data allocation unit on disk) - - Minix FS 1024 - - Xenix FS 1024 (also 512 ??) - - SystemV FS 1024 (also 512 and 2048) - - Coherent FS 512 - -* General layout: all have one boot block, one super block and - separate areas for inodes and for directories/data. - On SystemV Release 2 FS (e.g. Microport) the first track is reserved and - all the block numbers (including the super block) are offset by one track. - -* Byte ordering of "short" (16 bit entities) on disk: - - Minix FS little endian 0 1 - - Xenix FS little endian 0 1 - - SystemV FS little endian 0 1 - - Coherent FS little endian 0 1 - Of course, this affects only the file system, not the data of files on it! - -* Byte ordering of "long" (32 bit entities) on disk: - - Minix FS little endian 0 1 2 3 - - Xenix FS little endian 0 1 2 3 - - SystemV FS little endian 0 1 2 3 - - Coherent FS PDP-11 2 3 0 1 - Of course, this affects only the file system, not the data of files on it! - -* Inode on disk: "short", 0 means non-existent, the root dir ino is: - - Minix FS 1 - - Xenix FS, SystemV FS, Coherent FS 2 - -* Maximum number of hard links to a file: - - Minix FS 250 - - Xenix FS ?? - - SystemV FS ?? - - Coherent FS >=10000 - -* Free inode management: - - Minix FS a bitmap - - Xenix FS, SystemV FS, Coherent FS - There is a cache of a certain number of free inodes in the super-block. - When it is exhausted, new free inodes are found using a linear search. - -* Free block management: - - Minix FS a bitmap - - Xenix FS, SystemV FS, Coherent FS - Free blocks are organized in a "free list". Maybe a misleading term, - since it is not true that every free block contains a pointer to - the next free block. Rather, the free blocks are organized in chunks - of limited size, and every now and then a free block contains pointers - to the free blocks pertaining to the next chunk; the first of these - contains pointers and so on. The list terminates with a "block number" - 0 on Xenix FS and SystemV FS, with a block zeroed out on Coherent FS. - -* Super-block location: - - Minix FS block 1 = bytes 1024..2047 - - Xenix FS block 1 = bytes 1024..2047 - - SystemV FS bytes 512..1023 - - Coherent FS block 1 = bytes 512..1023 - -* Super-block layout: - - Minix FS - unsigned short s_ninodes; - unsigned short s_nzones; - unsigned short s_imap_blocks; - unsigned short s_zmap_blocks; - unsigned short s_firstdatazone; - unsigned short s_log_zone_size; - unsigned long s_max_size; - unsigned short s_magic; - - Xenix FS, SystemV FS, Coherent FS - unsigned short s_firstdatazone; - unsigned long s_nzones; - unsigned short s_fzone_count; - unsigned long s_fzones[NICFREE]; - unsigned short s_finode_count; - unsigned short s_finodes[NICINOD]; - char s_flock; - char s_ilock; - char s_modified; - char s_rdonly; - unsigned long s_time; - short s_dinfo[4]; -- SystemV FS only - unsigned long s_free_zones; - unsigned short s_free_inodes; - short s_dinfo[4]; -- Xenix FS only - unsigned short s_interleave_m,s_interleave_n; -- Coherent FS only - char s_fname[6]; - char s_fpack[6]; - then they differ considerably: - Xenix FS - char s_clean; - char s_fill[371]; - long s_magic; - long s_type; - SystemV FS - long s_fill[12 or 14]; - long s_state; - long s_magic; - long s_type; - Coherent FS - unsigned long s_unique; - Note that Coherent FS has no magic. - -* Inode layout: - - Minix FS - unsigned short i_mode; - unsigned short i_uid; - unsigned long i_size; - unsigned long i_time; - unsigned char i_gid; - unsigned char i_nlinks; - unsigned short i_zone[7+1+1]; - - Xenix FS, SystemV FS, Coherent FS - unsigned short i_mode; - unsigned short i_nlink; - unsigned short i_uid; - unsigned short i_gid; - unsigned long i_size; - unsigned char i_zone[3*(10+1+1+1)]; - unsigned long i_atime; - unsigned long i_mtime; - unsigned long i_ctime; - -* Regular file data blocks are organized as - - Minix FS - 7 direct blocks - 1 indirect block (pointers to blocks) - 1 double-indirect block (pointer to pointers to blocks) - - Xenix FS, SystemV FS, Coherent FS - 10 direct blocks - 1 indirect block (pointers to blocks) - 1 double-indirect block (pointer to pointers to blocks) - 1 triple-indirect block (pointer to pointers to pointers to blocks) - -* Inode size, inodes per block - - Minix FS 32 32 - - Xenix FS 64 16 - - SystemV FS 64 16 - - Coherent FS 64 8 - -* Directory entry on disk - - Minix FS - unsigned short inode; - char name[14/30]; - - Xenix FS, SystemV FS, Coherent FS - unsigned short inode; - char name[14]; - -* Dir entry size, dir entries per block - - Minix FS 16/32 64/32 - - Xenix FS 16 64 - - SystemV FS 16 64 - - Coherent FS 16 32 - -* How to implement symbolic links such that the host fsck doesn't scream: - - Minix FS normal - - Xenix FS kludge: as regular files with chmod 1000 - - SystemV FS ?? - - Coherent FS kludge: as regular files with chmod 1000 - - -Notation: We often speak of a "block" but mean a zone (the allocation unit) -and not the disk driver's notion of "block". - - -Bruno Haible <haible@ma2s2.mathematik.uni-karlsruhe.de> diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c index d63c5e48b050..ead9864567e3 100644 --- a/fs/sysv/inode.c +++ b/fs/sysv/inode.c @@ -301,13 +301,13 @@ static void sysv_delete_inode(struct inode *inode) unlock_kernel(); } -static kmem_cache_t *sysv_inode_cachep; +static struct kmem_cache *sysv_inode_cachep; static struct inode *sysv_alloc_inode(struct super_block *sb) { struct sysv_inode_info *si; - si = kmem_cache_alloc(sysv_inode_cachep, SLAB_KERNEL); + si = kmem_cache_alloc(sysv_inode_cachep, GFP_KERNEL); if (!si) return NULL; return &si->vfs_inode; @@ -318,7 +318,7 @@ static void sysv_destroy_inode(struct inode *inode) kmem_cache_free(sysv_inode_cachep, SYSV_I(inode)); } -static void init_once(void *p, kmem_cache_t *cachep, unsigned long flags) +static void init_once(void *p, struct kmem_cache *cachep, unsigned long flags) { struct sysv_inode_info *si = (struct sysv_inode_info *)p; diff --git a/fs/udf/super.c b/fs/udf/super.c index 1aea6a4f9a4a..1dbc2955f02e 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -107,12 +107,12 @@ static struct file_system_type udf_fstype = { .fs_flags = FS_REQUIRES_DEV, }; -static kmem_cache_t * udf_inode_cachep; +static struct kmem_cache * udf_inode_cachep; static struct inode *udf_alloc_inode(struct super_block *sb) { struct udf_inode_info *ei; - ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL); + ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL); if (!ei) return NULL; @@ -130,7 +130,7 @@ static void udf_destroy_inode(struct inode *inode) kmem_cache_free(udf_inode_cachep, UDF_I(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct udf_inode_info *ei = (struct udf_inode_info *) foo; @@ -1709,7 +1709,7 @@ void udf_error(struct super_block *sb, const char *function, sb->s_dirt = 1; } va_start(args, fmt); - vsprintf(error_buf, fmt, args); + vsnprintf(error_buf, sizeof(error_buf), fmt, args); va_end(args); printk (KERN_CRIT "UDF-fs error (device %s): %s: %s\n", sb->s_id, function, error_buf); @@ -1721,7 +1721,7 @@ void udf_warning(struct super_block *sb, const char *function, va_list args; va_start (args, fmt); - vsprintf(error_buf, fmt, args); + vsnprintf(error_buf, sizeof(error_buf), fmt, args); va_end(args); printk(KERN_WARNING "UDF-fs warning (device %s): %s: %s\n", sb->s_id, function, error_buf); diff --git a/fs/ufs/super.c b/fs/ufs/super.c index ec79e3091d1b..8a8e9382ec09 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -224,7 +224,7 @@ void ufs_error (struct super_block * sb, const char * function, sb->s_flags |= MS_RDONLY; } va_start (args, fmt); - vsprintf (error_buf, fmt, args); + vsnprintf (error_buf, sizeof(error_buf), fmt, args); va_end (args); switch (UFS_SB(sb)->s_mount_opt & UFS_MOUNT_ONERROR) { case UFS_MOUNT_ONERROR_PANIC: @@ -255,7 +255,7 @@ void ufs_panic (struct super_block * sb, const char * function, sb->s_dirt = 1; } va_start (args, fmt); - vsprintf (error_buf, fmt, args); + vsnprintf (error_buf, sizeof(error_buf), fmt, args); va_end (args); sb->s_flags |= MS_RDONLY; printk (KERN_CRIT "UFS-fs panic (device %s): %s: %s\n", @@ -268,7 +268,7 @@ void ufs_warning (struct super_block * sb, const char * function, va_list args; va_start (args, fmt); - vsprintf (error_buf, fmt, args); + vsnprintf (error_buf, sizeof(error_buf), fmt, args); va_end (args); printk (KERN_WARNING "UFS-fs warning (device %s): %s: %s\n", sb->s_id, function, error_buf); @@ -1204,12 +1204,12 @@ static int ufs_statfs(struct dentry *dentry, struct kstatfs *buf) return 0; } -static kmem_cache_t * ufs_inode_cachep; +static struct kmem_cache * ufs_inode_cachep; static struct inode *ufs_alloc_inode(struct super_block *sb) { struct ufs_inode_info *ei; - ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, SLAB_KERNEL); + ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, GFP_KERNEL); if (!ei) return NULL; ei->vfs_inode.i_version = 1; @@ -1221,7 +1221,7 @@ static void ufs_destroy_inode(struct inode *inode) kmem_cache_free(ufs_inode_cachep, UFS_I(inode)); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct ufs_inode_info *ei = (struct ufs_inode_info *) foo; diff --git a/fs/ufs/util.h b/fs/ufs/util.h index 28fce6c239b5..7dd12bb1d62b 100644 --- a/fs/ufs/util.h +++ b/fs/ufs/util.h @@ -299,7 +299,7 @@ static inline void *get_usb_offset(struct ufs_sb_private_info *uspi, #define ubh_get_addr16(ubh,begin) \ (((__fs16*)((ubh)->bh[(begin) >> (uspi->s_fshift-1)]->b_data)) + \ - ((begin) & (uspi->fsize>>1) - 1))) + ((begin) & ((uspi->fsize>>1) - 1))) #define ubh_get_addr32(ubh,begin) \ (((__fs32*)((ubh)->bh[(begin) >> (uspi->s_fshift-2)]->b_data)) + \ diff --git a/fs/utimes.c b/fs/utimes.c index 1bcd852fc4a9..99cf2cb11fec 100644 --- a/fs/utimes.c +++ b/fs/utimes.c @@ -2,6 +2,7 @@ #include <linux/fs.h> #include <linux/linkage.h> #include <linux/namei.h> +#include <linux/sched.h> #include <linux/utime.h> #include <asm/uaccess.h> #include <asm/unistd.h> diff --git a/fs/vfat/namei.c b/fs/vfat/namei.c index edb711ff7b05..0afd745a37cd 100644 --- a/fs/vfat/namei.c +++ b/fs/vfat/namei.c @@ -1004,6 +1004,7 @@ static struct inode_operations vfat_dir_inode_operations = { .rmdir = vfat_rmdir, .rename = vfat_rename, .setattr = fat_notify_change, + .getattr = fat_getattr, }; static int vfat_fill_super(struct super_block *sb, void *data, int silent) diff --git a/fs/xfs/Makefile-linux-2.6 b/fs/xfs/Makefile-linux-2.6 index 291948d5085a..b49989bb89ad 100644 --- a/fs/xfs/Makefile-linux-2.6 +++ b/fs/xfs/Makefile-linux-2.6 @@ -21,22 +21,7 @@ EXTRA_CFLAGS += -Ifs/xfs -Ifs/xfs/linux-2.6 -funsigned-char XFS_LINUX := linux-2.6 ifeq ($(CONFIG_XFS_DEBUG),y) - EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG - EXTRA_CFLAGS += -DXFS_BUF_LOCK_TRACKING -endif -ifeq ($(CONFIG_XFS_TRACE),y) - EXTRA_CFLAGS += -DXFS_ALLOC_TRACE - EXTRA_CFLAGS += -DXFS_ATTR_TRACE - EXTRA_CFLAGS += -DXFS_BLI_TRACE - EXTRA_CFLAGS += -DXFS_BMAP_TRACE - EXTRA_CFLAGS += -DXFS_BMBT_TRACE - EXTRA_CFLAGS += -DXFS_DIR2_TRACE - EXTRA_CFLAGS += -DXFS_DQUOT_TRACE - EXTRA_CFLAGS += -DXFS_ILOCK_TRACE - EXTRA_CFLAGS += -DXFS_LOG_TRACE - EXTRA_CFLAGS += -DXFS_RW_TRACE - EXTRA_CFLAGS += -DXFS_BUF_TRACE - EXTRA_CFLAGS += -DXFS_VNODE_TRACE + EXTRA_CFLAGS += -g endif obj-$(CONFIG_XFS_FS) += xfs.o diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 09360cf1e1f2..8e6b56fc1cad 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -149,9 +149,10 @@ xfs_destroy_ioend( */ STATIC void xfs_end_bio_delalloc( - void *data) + struct work_struct *work) { - xfs_ioend_t *ioend = data; + xfs_ioend_t *ioend = + container_of(work, xfs_ioend_t, io_work); xfs_destroy_ioend(ioend); } @@ -161,9 +162,10 @@ xfs_end_bio_delalloc( */ STATIC void xfs_end_bio_written( - void *data) + struct work_struct *work) { - xfs_ioend_t *ioend = data; + xfs_ioend_t *ioend = + container_of(work, xfs_ioend_t, io_work); xfs_destroy_ioend(ioend); } @@ -176,9 +178,10 @@ xfs_end_bio_written( */ STATIC void xfs_end_bio_unwritten( - void *data) + struct work_struct *work) { - xfs_ioend_t *ioend = data; + xfs_ioend_t *ioend = + container_of(work, xfs_ioend_t, io_work); bhv_vnode_t *vp = ioend->io_vnode; xfs_off_t offset = ioend->io_offset; size_t size = ioend->io_size; @@ -220,11 +223,11 @@ xfs_alloc_ioend( ioend->io_size = 0; if (type == IOMAP_UNWRITTEN) - INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend); + INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten); else if (type == IOMAP_DELAY) - INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend); + INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc); else - INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend); + INIT_WORK(&ioend->io_work, xfs_end_bio_written); return ioend; } diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index db5f5a3608ca..4fb01ffdfd1a 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -15,6 +15,7 @@ * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "xfs.h" #include <linux/stddef.h> #include <linux/errno.h> #include <linux/slab.h> @@ -31,7 +32,7 @@ #include <linux/kthread.h> #include <linux/migrate.h> #include <linux/backing-dev.h> -#include "xfs_linux.h" +#include <linux/freezer.h> STATIC kmem_zone_t *xfs_buf_zone; STATIC kmem_shaker_t xfs_buf_shake; @@ -994,9 +995,10 @@ xfs_buf_wait_unpin( STATIC void xfs_buf_iodone_work( - void *v) + struct work_struct *work) { - xfs_buf_t *bp = (xfs_buf_t *)v; + xfs_buf_t *bp = + container_of(work, xfs_buf_t, b_iodone_work); if (bp->b_iodone) (*(bp->b_iodone))(bp); @@ -1017,10 +1019,10 @@ xfs_buf_ioend( if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { if (schedule) { - INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp); + INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); queue_work(xfslogd_workqueue, &bp->b_iodone_work); } else { - xfs_buf_iodone_work(bp); + xfs_buf_iodone_work(&bp->b_iodone_work); } } else { up(&bp->b_iodonesema); @@ -1406,7 +1408,7 @@ xfs_alloc_bufhash( btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */ btp->bt_hashmask = (1 << btp->bt_hashshift) - 1; btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) * - sizeof(xfs_bufhash_t), KM_SLEEP); + sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE); for (i = 0; i < (1 << btp->bt_hashshift); i++) { spin_lock_init(&btp->bt_hash[i].bh_lock); INIT_LIST_HEAD(&btp->bt_hash[i].bh_list); @@ -1825,11 +1827,11 @@ xfs_buf_init(void) if (!xfs_buf_zone) goto out_free_trace_buf; - xfslogd_workqueue = create_workqueue("xfslogd"); + xfslogd_workqueue = create_freezeable_workqueue("xfslogd"); if (!xfslogd_workqueue) goto out_free_buf_zone; - xfsdatad_workqueue = create_workqueue("xfsdatad"); + xfsdatad_workqueue = create_freezeable_workqueue("xfsdatad"); if (!xfsdatad_workqueue) goto out_destroy_xfslogd_workqueue; diff --git a/fs/xfs/linux-2.6/xfs_dmapi_priv.h b/fs/xfs/linux-2.6/xfs_dmapi_priv.h new file mode 100644 index 000000000000..a8b0b1685eed --- /dev/null +++ b/fs/xfs/linux-2.6/xfs_dmapi_priv.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2000-2006 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_DMAPI_PRIV_H__ +#define __XFS_DMAPI_PRIV_H__ + +/* + * Based on IO_ISDIRECT, decide which i_ flag is set. + */ +#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \ + DM_FLAGS_IMUX : 0) +#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX) + +#endif /*__XFS_DMAPI_PRIV_H__*/ diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index a74f854d91e6..74d094829a4d 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -341,8 +341,11 @@ xfs_open_by_handle( put_unused_fd(new_fd); return -XFS_ERROR(-PTR_ERR(filp)); } - if (inode->i_mode & S_IFREG) + if (inode->i_mode & S_IFREG) { + /* invisible operation should not change atime */ + filp->f_flags |= O_NOATIME; filp->f_op = &xfs_invis_file_operations; + } fd_install(new_fd, filp); return new_fd; diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 38c4d128a8c0..b93265b7c79c 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -56,6 +56,7 @@ #include <linux/mempool.h> #include <linux/writeback.h> #include <linux/kthread.h> +#include <linux/freezer.h> STATIC struct quotactl_ops xfs_quotactl_operations; STATIC struct super_operations xfs_super_operations; @@ -227,9 +228,7 @@ xfs_initialize_vnode( xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip); xfs_set_inodeops(inode); - spin_lock(&ip->i_flags_lock); - ip->i_flags &= ~XFS_INEW; - spin_unlock(&ip->i_flags_lock); + xfs_iflags_clear(ip, XFS_INEW); barrier(); unlock_new_inode(inode); diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c index c75f68361e33..4363512d2f90 100644 --- a/fs/xfs/support/debug.c +++ b/fs/xfs/support/debug.c @@ -15,11 +15,9 @@ * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#include <xfs.h> #include "debug.h" #include "spin.h" -#include <asm/page.h> -#include <linux/sched.h> -#include <linux/kernel.h> static char message[256]; /* keep it off the stack */ static DEFINE_SPINLOCK(xfs_err_lock); diff --git a/fs/xfs/support/move.c b/fs/xfs/support/move.c index caefa17b80fe..ac8617ca3909 100644 --- a/fs/xfs/support/move.c +++ b/fs/xfs/support/move.c @@ -22,7 +22,7 @@ * as we go. */ int -uio_read(caddr_t src, size_t len, struct uio *uio) +xfs_uio_read(caddr_t src, size_t len, struct uio *uio) { size_t count; diff --git a/fs/xfs/support/move.h b/fs/xfs/support/move.h index 97a2498d2da3..977879c24ff5 100644 --- a/fs/xfs/support/move.h +++ b/fs/xfs/support/move.h @@ -65,6 +65,6 @@ struct uio { typedef struct uio uio_t; typedef struct iovec iovec_t; -extern int uio_read (caddr_t, size_t, uio_t *); +extern int xfs_uio_read (caddr_t, size_t, uio_t *); #endif /* __XFS_SUPPORT_MOVE_H__ */ diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h index 1a48dbb902a7..bf0a12040b13 100644 --- a/fs/xfs/xfs.h +++ b/fs/xfs/xfs.h @@ -17,5 +17,28 @@ */ #ifndef __XFS_H__ #define __XFS_H__ + +#ifdef CONFIG_XFS_DEBUG +#define STATIC +#define DEBUG 1 +#define XFS_BUF_LOCK_TRACKING 1 +/* #define QUOTADEBUG 1 */ +#endif + +#ifdef CONFIG_XFS_TRACE +#define XFS_ALLOC_TRACE 1 +#define XFS_ATTR_TRACE 1 +#define XFS_BLI_TRACE 1 +#define XFS_BMAP_TRACE 1 +#define XFS_BMBT_TRACE 1 +#define XFS_DIR2_TRACE 1 +#define XFS_DQUOT_TRACE 1 +#define XFS_ILOCK_TRACE 1 +#define XFS_LOG_TRACE 1 +#define XFS_RW_TRACE 1 +#define XFS_BUF_TRACE 1 +#define XFS_VNODE_TRACE 1 +#endif + #include <linux-2.6/xfs_linux.h> #endif /* __XFS_H__ */ diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 5b050c06795f..498ad50d1f45 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -1171,6 +1171,8 @@ xfs_bmap_add_extent_delay_real( xfs_bmap_trace_pre_update(fname, "0", ip, idx, XFS_DATA_FORK); xfs_bmbt_set_blockcount(ep, temp); r[0] = *new; + r[1].br_state = PREV.br_state; + r[1].br_startblock = 0; r[1].br_startoff = new_endoff; temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; r[1].br_blockcount = temp2; diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c index 8edbe1adb95b..8e8e5279334a 100644 --- a/fs/xfs/xfs_dir2.c +++ b/fs/xfs/xfs_dir2.c @@ -678,7 +678,7 @@ xfs_dir2_put_dirent64_uio( idbp->d_off = pa->cook; idbp->d_name[namelen] = '\0'; memcpy(idbp->d_name, pa->name, namelen); - rval = uio_read((caddr_t)idbp, reclen, uio); + rval = xfs_uio_read((caddr_t)idbp, reclen, uio); pa->done = (rval == 0); return rval; } diff --git a/fs/xfs/xfs_dmapi.h b/fs/xfs/xfs_dmapi.h index 4e7865ad6f0e..adc3d251240d 100644 --- a/fs/xfs/xfs_dmapi.h +++ b/fs/xfs/xfs_dmapi.h @@ -157,27 +157,9 @@ typedef enum { #define DM_FLAGS_IALLOCSEM_WR 0x020 /* thread holds i_alloc_sem wr */ /* - * Based on IO_ISDIRECT, decide which i_ flag is set. + * Pull in platform specific event flags defines */ -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) -#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \ - DM_FLAGS_IMUX : 0) -#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX) -#endif - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) && \ - (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,22)) -#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \ - DM_FLAGS_IALLOCSEM_RD : DM_FLAGS_IMUX) -#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX) -#endif - -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,21) -#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \ - 0 : DM_FLAGS_IMUX) -#define DM_SEM_FLAG_WR (DM_FLAGS_IMUX) -#endif - +#include "xfs_dmapi_priv.h" /* * Macros to turn caller specified delay/block flags into diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index b73d216ecaf9..c1c89dac19cc 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -215,7 +215,7 @@ again: * If INEW is set this inode is being set up * we need to pause and try again. */ - if (ip->i_flags & XFS_INEW) { + if (xfs_iflags_test(ip, XFS_INEW)) { read_unlock(&ih->ih_lock); delay(1); XFS_STATS_INC(xs_ig_frecycle); @@ -230,22 +230,50 @@ again: * on its way out of the system, * we need to pause and try again. */ - if (ip->i_flags & XFS_IRECLAIM) { + if (xfs_iflags_test(ip, XFS_IRECLAIM)) { read_unlock(&ih->ih_lock); delay(1); XFS_STATS_INC(xs_ig_frecycle); goto again; } + ASSERT(xfs_iflags_test(ip, XFS_IRECLAIMABLE)); + + /* + * If lookup is racing with unlink, then we + * should return an error immediately so we + * don't remove it from the reclaim list and + * potentially leak the inode. + */ + if ((ip->i_d.di_mode == 0) && + !(flags & XFS_IGET_CREATE)) { + read_unlock(&ih->ih_lock); + return ENOENT; + } + + /* + * There may be transactions sitting in the + * incore log buffers or being flushed to disk + * at this time. We can't clear the + * XFS_IRECLAIMABLE flag until these + * transactions have hit the disk, otherwise we + * will void the guarantee the flag provides + * xfs_iunpin() + */ + if (xfs_ipincount(ip)) { + read_unlock(&ih->ih_lock); + xfs_log_force(mp, 0, + XFS_LOG_FORCE|XFS_LOG_SYNC); + XFS_STATS_INC(xs_ig_frecycle); + goto again; + } vn_trace_exit(vp, "xfs_iget.alloc", (inst_t *)__return_address); XFS_STATS_INC(xs_ig_found); - spin_lock(&ip->i_flags_lock); - ip->i_flags &= ~XFS_IRECLAIMABLE; - spin_unlock(&ip->i_flags_lock); + xfs_iflags_clear(ip, XFS_IRECLAIMABLE); version = ih->ih_version; read_unlock(&ih->ih_lock); xfs_ihash_promote(ih, ip, version); @@ -299,10 +327,7 @@ finish_inode: if (lock_flags != 0) xfs_ilock(ip, lock_flags); - spin_lock(&ip->i_flags_lock); - ip->i_flags &= ~XFS_ISTALE; - spin_unlock(&ip->i_flags_lock); - + xfs_iflags_clear(ip, XFS_ISTALE); vn_trace_exit(vp, "xfs_iget.found", (inst_t *)__return_address); goto return_ip; @@ -371,10 +396,7 @@ finish_inode: ih->ih_next = ip; ip->i_udquot = ip->i_gdquot = NULL; ih->ih_version++; - spin_lock(&ip->i_flags_lock); - ip->i_flags |= XFS_INEW; - spin_unlock(&ip->i_flags_lock); - + xfs_iflags_set(ip, XFS_INEW); write_unlock(&ih->ih_lock); /* @@ -625,7 +647,7 @@ xfs_iput_new(xfs_inode_t *ip, vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address); if ((ip->i_d.di_mode == 0)) { - ASSERT(!(ip->i_flags & XFS_IRECLAIMABLE)); + ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); vn_mark_bad(vp); } if (inode->i_state & I_NEW) @@ -683,6 +705,7 @@ xfs_ireclaim(xfs_inode_t *ip) /* * Free all memory associated with the inode. */ + xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); xfs_idestroy(ip); } diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index c27d7d495aa0..44dfac521285 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2193,7 +2193,7 @@ xfs_ifree_cluster( /* Inode not in memory or we found it already, * nothing to do */ - if (!ip || (ip->i_flags & XFS_ISTALE)) { + if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) { read_unlock(&ih->ih_lock); continue; } @@ -2215,10 +2215,7 @@ xfs_ifree_cluster( if (ip == free_ip) { if (xfs_iflock_nowait(ip)) { - spin_lock(&ip->i_flags_lock); - ip->i_flags |= XFS_ISTALE; - spin_unlock(&ip->i_flags_lock); - + xfs_iflags_set(ip, XFS_ISTALE); if (xfs_inode_clean(ip)) { xfs_ifunlock(ip); } else { @@ -2231,9 +2228,7 @@ xfs_ifree_cluster( if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { if (xfs_iflock_nowait(ip)) { - spin_lock(&ip->i_flags_lock); - ip->i_flags |= XFS_ISTALE; - spin_unlock(&ip->i_flags_lock); + xfs_iflags_set(ip, XFS_ISTALE); if (xfs_inode_clean(ip)) { xfs_ifunlock(ip); @@ -2263,9 +2258,7 @@ xfs_ifree_cluster( AIL_LOCK(mp,s); iip->ili_flush_lsn = iip->ili_item.li_lsn; AIL_UNLOCK(mp, s); - spin_lock(&iip->ili_inode->i_flags_lock); - iip->ili_inode->i_flags |= XFS_ISTALE; - spin_unlock(&iip->ili_inode->i_flags_lock); + xfs_iflags_set(iip->ili_inode, XFS_ISTALE); pre_flushed++; } lip = lip->li_bio_list; @@ -2748,42 +2741,39 @@ xfs_iunpin( { ASSERT(atomic_read(&ip->i_pincount) > 0); - if (atomic_dec_and_test(&ip->i_pincount)) { + if (atomic_dec_and_lock(&ip->i_pincount, &ip->i_flags_lock)) { + /* - * If the inode is currently being reclaimed, the - * linux inode _and_ the xfs vnode may have been - * freed so we cannot reference either of them safely. - * Hence we should not try to do anything to them - * if the xfs inode is currently in the reclaim - * path. + * If the inode is currently being reclaimed, the link between + * the bhv_vnode and the xfs_inode will be broken after the + * XFS_IRECLAIM* flag is set. Hence, if these flags are not + * set, then we can move forward and mark the linux inode dirty + * knowing that it is still valid as it won't freed until after + * the bhv_vnode<->xfs_inode link is broken in xfs_reclaim. The + * i_flags_lock is used to synchronise the setting of the + * XFS_IRECLAIM* flags and the breaking of the link, and so we + * can execute atomically w.r.t to reclaim by holding this lock + * here. * - * However, we still need to issue the unpin wakeup - * call as the inode reclaim may be blocked waiting for - * the inode to become unpinned. + * However, we still need to issue the unpin wakeup call as the + * inode reclaim may be blocked waiting for the inode to become + * unpinned. */ - struct inode *inode = NULL; - spin_lock(&ip->i_flags_lock); - if (!(ip->i_flags & (XFS_IRECLAIM|XFS_IRECLAIMABLE))) { + if (!__xfs_iflags_test(ip, XFS_IRECLAIM|XFS_IRECLAIMABLE)) { bhv_vnode_t *vp = XFS_ITOV_NULL(ip); + struct inode *inode = NULL; + + BUG_ON(vp == NULL); + inode = vn_to_inode(vp); + BUG_ON(inode->i_state & I_CLEAR); /* make sync come back and flush this inode */ - if (vp) { - inode = vn_to_inode(vp); - - if (!(inode->i_state & - (I_NEW|I_FREEING|I_CLEAR))) { - inode = igrab(inode); - if (inode) - mark_inode_dirty_sync(inode); - } else - inode = NULL; - } + if (!(inode->i_state & (I_NEW|I_FREEING))) + mark_inode_dirty_sync(inode); } spin_unlock(&ip->i_flags_lock); wake_up(&ip->i_ipin_wait); - if (inode) - iput(inode); } } diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index e96eb0835fe6..bc823720d88f 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -305,6 +305,47 @@ typedef struct xfs_inode { #endif } xfs_inode_t; + +/* + * i_flags helper functions + */ +static inline void +__xfs_iflags_set(xfs_inode_t *ip, unsigned short flags) +{ + ip->i_flags |= flags; +} + +static inline void +xfs_iflags_set(xfs_inode_t *ip, unsigned short flags) +{ + spin_lock(&ip->i_flags_lock); + __xfs_iflags_set(ip, flags); + spin_unlock(&ip->i_flags_lock); +} + +static inline void +xfs_iflags_clear(xfs_inode_t *ip, unsigned short flags) +{ + spin_lock(&ip->i_flags_lock); + ip->i_flags &= ~flags; + spin_unlock(&ip->i_flags_lock); +} + +static inline int +__xfs_iflags_test(xfs_inode_t *ip, unsigned short flags) +{ + return (ip->i_flags & flags); +} + +static inline int +xfs_iflags_test(xfs_inode_t *ip, unsigned short flags) +{ + int ret; + spin_lock(&ip->i_flags_lock); + ret = __xfs_iflags_test(ip, flags); + spin_unlock(&ip->i_flags_lock); + return ret; +} #endif /* __KERNEL__ */ diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 061e2ffdd1de..bda774a04b8f 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -1013,7 +1013,7 @@ xfs_readlink( pathlen = (int)ip->i_d.di_size; if (ip->i_df.if_flags & XFS_IFINLINE) { - error = uio_read(ip->i_df.if_u1.if_data, pathlen, uiop); + error = xfs_uio_read(ip->i_df.if_u1.if_data, pathlen, uiop); } else { /* @@ -1044,7 +1044,7 @@ xfs_readlink( byte_cnt = pathlen; pathlen -= byte_cnt; - error = uio_read(XFS_BUF_PTR(bp), byte_cnt, uiop); + error = xfs_uio_read(XFS_BUF_PTR(bp), byte_cnt, uiop); xfs_buf_relse (bp); } @@ -3827,11 +3827,16 @@ xfs_reclaim( */ xfs_synchronize_atime(ip); - /* If we have nothing to flush with this inode then complete the - * teardown now, otherwise break the link between the xfs inode - * and the linux inode and clean up the xfs inode later. This - * avoids flushing the inode to disk during the delete operation - * itself. + /* + * If we have nothing to flush with this inode then complete the + * teardown now, otherwise break the link between the xfs inode and the + * linux inode and clean up the xfs inode later. This avoids flushing + * the inode to disk during the delete operation itself. + * + * When breaking the link, we need to set the XFS_IRECLAIMABLE flag + * first to ensure that xfs_iunpin() will never see an xfs inode + * that has a linux inode being reclaimed. Synchronisation is provided + * by the i_flags_lock. */ if (!ip->i_update_core && (ip->i_itemp == NULL)) { xfs_ilock(ip, XFS_ILOCK_EXCL); @@ -3840,13 +3845,13 @@ xfs_reclaim( } else { xfs_mount_t *mp = ip->i_mount; - /* Protect sync from us */ + /* Protect sync and unpin from us */ XFS_MOUNT_ILOCK(mp); - vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip)); - list_add_tail(&ip->i_reclaim, &mp->m_del_inodes); spin_lock(&ip->i_flags_lock); - ip->i_flags |= XFS_IRECLAIMABLE; + __xfs_iflags_set(ip, XFS_IRECLAIMABLE); + vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip)); spin_unlock(&ip->i_flags_lock); + list_add_tail(&ip->i_reclaim, &mp->m_del_inodes); XFS_MOUNT_IUNLOCK(mp); } return 0; @@ -3872,8 +3877,8 @@ xfs_finish_reclaim( */ write_lock(&ih->ih_lock); spin_lock(&ip->i_flags_lock); - if ((ip->i_flags & XFS_IRECLAIM) || - (!(ip->i_flags & XFS_IRECLAIMABLE) && vp == NULL)) { + if (__xfs_iflags_test(ip, XFS_IRECLAIM) || + (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) && vp == NULL)) { spin_unlock(&ip->i_flags_lock); write_unlock(&ih->ih_lock); if (locked) { @@ -3882,7 +3887,7 @@ xfs_finish_reclaim( } return 1; } - ip->i_flags |= XFS_IRECLAIM; + __xfs_iflags_set(ip, XFS_IRECLAIM); spin_unlock(&ip->i_flags_lock); write_unlock(&ih->ih_lock); |