diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-06-25 06:27:31 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-06-25 06:27:31 -0400 |
commit | ccf01ef7aa9c6c293a1c64c27331a2ce227916ec (patch) | |
tree | 421fa29aedff988e392f92780637553e275d37a0 /fs/nfs | |
parent | 82b145c5a572f7fa7211dffe2097234dc91bcecc (diff) | |
download | blackbird-op-linux-ccf01ef7aa9c6c293a1c64c27331a2ce227916ec.tar.gz blackbird-op-linux-ccf01ef7aa9c6c293a1c64c27331a2ce227916ec.zip |
Merge branch 'odirect'
Diffstat (limited to 'fs/nfs')
-rw-r--r-- | fs/nfs/direct.c | 435 |
1 files changed, 234 insertions, 201 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index e25b7595b7ad..402005c35ab3 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -68,19 +68,25 @@ struct nfs_direct_req { struct kref kref; /* release manager */ /* I/O parameters */ + struct list_head list, /* nfs_read/write_data structs */ + rewrite_list; /* saved nfs_write_data structs */ struct nfs_open_context *ctx; /* file open context info */ struct kiocb * iocb; /* controlling i/o request */ struct inode * inode; /* target file of i/o */ + unsigned long user_addr; /* location of user's buffer */ + size_t user_count; /* total bytes to move */ + loff_t pos; /* starting offset in file */ + struct page ** pages; /* pages in our buffer */ + unsigned int npages; /* count of pages */ /* completion state */ - atomic_t io_count; /* i/os we're waiting for */ spinlock_t lock; /* protect completion state */ + int outstanding; /* i/os we're waiting for */ ssize_t count, /* bytes actually processed */ error; /* any reported error */ struct completion completion; /* wait for i/o completion */ /* commit state */ - struct list_head rewrite_list; /* saved nfs_write_data structs */ struct nfs_write_data * commit_data; /* special write_data for commits */ int flags; #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ @@ -88,37 +94,8 @@ struct nfs_direct_req { struct nfs_writeverf verf; /* unstable write verifier */ }; +static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync); static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode); -static const struct rpc_call_ops nfs_write_direct_ops; - -static inline void get_dreq(struct nfs_direct_req *dreq) -{ - atomic_inc(&dreq->io_count); -} - -static inline int put_dreq(struct nfs_direct_req *dreq) -{ - return atomic_dec_and_test(&dreq->io_count); -} - -/* - * "size" is never larger than rsize or wsize. - */ -static inline int nfs_direct_count_pages(unsigned long user_addr, size_t size) -{ - int page_count; - - page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT; - page_count -= user_addr >> PAGE_SHIFT; - BUG_ON(page_count < 0); - - return page_count; -} - -static inline unsigned int nfs_max_pages(unsigned int size) -{ - return (size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; -} /** * nfs_direct_IO - NFS address space operation for direct I/O @@ -142,21 +119,50 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_ return -EINVAL; } -static void nfs_direct_dirty_pages(struct page **pages, int npages) +static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty) { int i; for (i = 0; i < npages; i++) { struct page *page = pages[i]; - if (!PageCompound(page)) + if (do_dirty && !PageCompound(page)) set_page_dirty_lock(page); + page_cache_release(page); } + kfree(pages); } -static void nfs_direct_release_pages(struct page **pages, int npages) +static inline int nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages) { - int i; - for (i = 0; i < npages; i++) - page_cache_release(pages[i]); + int result = -ENOMEM; + unsigned long page_count; + size_t array_size; + + page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT; + page_count -= user_addr >> PAGE_SHIFT; + + array_size = (page_count * sizeof(struct page *)); + *pages = kmalloc(array_size, GFP_KERNEL); + if (*pages) { + down_read(¤t->mm->mmap_sem); + result = get_user_pages(current, current->mm, user_addr, + page_count, (rw == READ), 0, + *pages, NULL); + up_read(¤t->mm->mmap_sem); + if (result != page_count) { + /* + * If we got fewer pages than expected from + * get_user_pages(), the user buffer runs off the + * end of a mapping; return EFAULT. + */ + if (result >= 0) { + nfs_free_user_pages(*pages, result, 0); + result = -EFAULT; + } else + kfree(*pages); + *pages = NULL; + } + } + return result; } static inline struct nfs_direct_req *nfs_direct_req_alloc(void) @@ -168,13 +174,13 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void) return NULL; kref_init(&dreq->kref); - kref_get(&dreq->kref); init_completion(&dreq->completion); + INIT_LIST_HEAD(&dreq->list); INIT_LIST_HEAD(&dreq->rewrite_list); dreq->iocb = NULL; dreq->ctx = NULL; spin_lock_init(&dreq->lock); - atomic_set(&dreq->io_count, 0); + dreq->outstanding = 0; dreq->count = 0; dreq->error = 0; dreq->flags = 0; @@ -215,11 +221,18 @@ out: } /* - * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust - * the iocb is still valid here if this is a synchronous request. + * We must hold a reference to all the pages in this direct read request + * until the RPCs complete. This could be long *after* we are woken up in + * nfs_direct_wait (for instance, if someone hits ^C on a slow server). + * + * In addition, synchronous I/O uses a stack-allocated iocb. Thus we + * can't trust the iocb is still valid here if this is a synchronous + * request. If the waiter is woken prematurely, the iocb is long gone. */ static void nfs_direct_complete(struct nfs_direct_req *dreq) { + nfs_free_user_pages(dreq->pages, dreq->npages, 1); + if (dreq->iocb) { long res = (long) dreq->error; if (!res) @@ -232,10 +245,48 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq) } /* - * We must hold a reference to all the pages in this direct read request - * until the RPCs complete. This could be long *after* we are woken up in - * nfs_direct_wait (for instance, if someone hits ^C on a slow server). + * Note we also set the number of requests we have in the dreq when we are + * done. This prevents races with I/O completion so we will always wait + * until all requests have been dispatched and completed. */ +static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize) +{ + struct list_head *list; + struct nfs_direct_req *dreq; + unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + + dreq = nfs_direct_req_alloc(); + if (!dreq) + return NULL; + + list = &dreq->list; + for(;;) { + struct nfs_read_data *data = nfs_readdata_alloc(rpages); + + if (unlikely(!data)) { + while (!list_empty(list)) { + data = list_entry(list->next, + struct nfs_read_data, pages); + list_del(&data->pages); + nfs_readdata_free(data); + } + kref_put(&dreq->kref, nfs_direct_req_release); + return NULL; + } + + INIT_LIST_HEAD(&data->pages); + list_add(&data->pages, list); + + data->req = (struct nfs_page *) dreq; + dreq->outstanding++; + if (nbytes <= rsize) + break; + nbytes -= rsize; + } + kref_get(&dreq->kref); + return dreq; +} + static void nfs_direct_read_result(struct rpc_task *task, void *calldata) { struct nfs_read_data *data = calldata; @@ -244,9 +295,6 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata) if (nfs_readpage_result(task, data) != 0) return; - nfs_direct_dirty_pages(data->pagevec, data->npages); - nfs_direct_release_pages(data->pagevec, data->npages); - spin_lock(&dreq->lock); if (likely(task->tk_status >= 0)) @@ -254,10 +302,13 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata) else dreq->error = task->tk_status; - spin_unlock(&dreq->lock); + if (--dreq->outstanding) { + spin_unlock(&dreq->lock); + return; + } - if (put_dreq(dreq)) - nfs_direct_complete(dreq); + spin_unlock(&dreq->lock); + nfs_direct_complete(dreq); } static const struct rpc_call_ops nfs_read_direct_ops = { @@ -266,60 +317,41 @@ static const struct rpc_call_ops nfs_read_direct_ops = { }; /* - * For each rsize'd chunk of the user's buffer, dispatch an NFS READ - * operation. If nfs_readdata_alloc() or get_user_pages() fails, - * bail and stop sending more reads. Read length accounting is - * handled automatically by nfs_direct_read_result(). Otherwise, if - * no requests have been sent, just return an error. + * For each nfs_read_data struct that was allocated on the list, dispatch + * an NFS READ operation */ -static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos) +static void nfs_direct_read_schedule(struct nfs_direct_req *dreq) { struct nfs_open_context *ctx = dreq->ctx; struct inode *inode = ctx->dentry->d_inode; + struct list_head *list = &dreq->list; + struct page **pages = dreq->pages; + size_t count = dreq->user_count; + loff_t pos = dreq->pos; size_t rsize = NFS_SERVER(inode)->rsize; - unsigned int rpages = nfs_max_pages(rsize); - unsigned int pgbase; - int result; - ssize_t started = 0; - - get_dreq(dreq); + unsigned int curpage, pgbase; - pgbase = user_addr & ~PAGE_MASK; + curpage = 0; + pgbase = dreq->user_addr & ~PAGE_MASK; do { struct nfs_read_data *data; size_t bytes; - result = -ENOMEM; - data = nfs_readdata_alloc(rpages); - if (unlikely(!data)) - break; - bytes = rsize; if (count < rsize) bytes = count; - data->npages = nfs_direct_count_pages(user_addr, bytes); - down_read(¤t->mm->mmap_sem); - result = get_user_pages(current, current->mm, user_addr, - data->npages, 1, 0, data->pagevec, NULL); - up_read(¤t->mm->mmap_sem); - if (unlikely(result < data->npages)) { - if (result > 0) - nfs_direct_release_pages(data->pagevec, result); - nfs_readdata_release(data); - break; - } - - get_dreq(dreq); + BUG_ON(list_empty(list)); + data = list_entry(list->next, struct nfs_read_data, pages); + list_del_init(&data->pages); - data->req = (struct nfs_page *) dreq; data->inode = inode; data->cred = ctx->cred; data->args.fh = NFS_FH(inode); data->args.context = ctx; data->args.offset = pos; data->args.pgbase = pgbase; - data->args.pages = data->pagevec; + data->args.pages = &pages[curpage]; data->args.count = bytes; data->res.fattr = &data->fattr; data->res.eof = 0; @@ -342,35 +374,33 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo bytes, (unsigned long long)data->args.offset); - started += bytes; - user_addr += bytes; pos += bytes; pgbase += bytes; + curpage += pgbase >> PAGE_SHIFT; pgbase &= ~PAGE_MASK; count -= bytes; } while (count != 0); - - if (put_dreq(dreq)) - nfs_direct_complete(dreq); - - if (started) - return 0; - return result < 0 ? (ssize_t) result : -EFAULT; + BUG_ON(!list_empty(list)); } -static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos) +static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, unsigned int nr_pages) { - ssize_t result = 0; + ssize_t result; sigset_t oldset; struct inode *inode = iocb->ki_filp->f_mapping->host; struct rpc_clnt *clnt = NFS_CLIENT(inode); struct nfs_direct_req *dreq; - dreq = nfs_direct_req_alloc(); + dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize); if (!dreq) return -ENOMEM; + dreq->user_addr = user_addr; + dreq->user_count = count; + dreq->pos = pos; + dreq->pages = pages; + dreq->npages = nr_pages; dreq->inode = inode; dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); if (!is_sync_kiocb(iocb)) @@ -378,9 +408,8 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); rpc_clnt_sigmask(clnt, &oldset); - result = nfs_direct_read_schedule(dreq, user_addr, count, pos); - if (!result) - result = nfs_direct_wait(dreq); + nfs_direct_read_schedule(dreq); + result = nfs_direct_wait(dreq); rpc_clnt_sigunmask(clnt, &oldset); return result; @@ -388,10 +417,10 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) { - while (!list_empty(&dreq->rewrite_list)) { - struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages); + list_splice_init(&dreq->rewrite_list, &dreq->list); + while (!list_empty(&dreq->list)) { + struct nfs_write_data *data = list_entry(dreq->list.next, struct nfs_write_data, pages); list_del(&data->pages); - nfs_direct_release_pages(data->pagevec, data->npages); nfs_writedata_release(data); } } @@ -399,51 +428,14 @@ static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) { - struct inode *inode = dreq->inode; - struct list_head *p; - struct nfs_write_data *data; + struct list_head *pos; + list_splice_init(&dreq->rewrite_list, &dreq->list); + list_for_each(pos, &dreq->list) + dreq->outstanding++; dreq->count = 0; - get_dreq(dreq); - - list_for_each(p, &dreq->rewrite_list) { - data = list_entry(p, struct nfs_write_data, pages); - - get_dreq(dreq); - - /* - * Reset data->res. - */ - nfs_fattr_init(&data->fattr); - data->res.count = data->args.count; - memset(&data->verf, 0, sizeof(data->verf)); - - /* - * Reuse data->task; data->args should not have changed - * since the original request was sent. - */ - rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, - &nfs_write_direct_ops, data); - NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE); - - data->task.tk_priority = RPC_PRIORITY_NORMAL; - data->task.tk_cookie = (unsigned long) inode; - - /* - * We're called via an RPC callback, so BKL is already held. - */ - rpc_execute(&data->task); - - dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n", - data->task.tk_pid, - inode->i_sb->s_id, - (long long)NFS_FILEID(inode), - data->args.count, - (unsigned long long)data->args.offset); - } - if (put_dreq(dreq)) - nfs_direct_write_complete(dreq, inode); + nfs_direct_write_schedule(dreq, FLUSH_STABLE); } static void nfs_direct_commit_result(struct rpc_task *task, void *calldata) @@ -480,8 +472,8 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) data->cred = dreq->ctx->cred; data->args.fh = NFS_FH(data->inode); - data->args.offset = 0; - data->args.count = 0; + data->args.offset = dreq->pos; + data->args.count = dreq->user_count; data->res.count = 0; data->res.fattr = &data->fattr; data->res.verf = &data->verf; @@ -543,6 +535,47 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode } #endif +static struct nfs_direct_req *nfs_direct_write_alloc(size_t nbytes, size_t wsize) +{ + struct list_head *list; + struct nfs_direct_req *dreq; + unsigned int wpages = (wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + + dreq = nfs_direct_req_alloc(); + if (!dreq) + return NULL; + + list = &dreq->list; + for(;;) { + struct nfs_write_data *data = nfs_writedata_alloc(wpages); + + if (unlikely(!data)) { + while (!list_empty(list)) { + data = list_entry(list->next, + struct nfs_write_data, pages); + list_del(&data->pages); + nfs_writedata_free(data); + } + kref_put(&dreq->kref, nfs_direct_req_release); + return NULL; + } + + INIT_LIST_HEAD(&data->pages); + list_add(&data->pages, list); + + data->req = (struct nfs_page *) dreq; + dreq->outstanding++; + if (nbytes <= wsize) + break; + nbytes -= wsize; + } + + nfs_alloc_commit_data(dreq); + + kref_get(&dreq->kref); + return dreq; +} + static void nfs_direct_write_result(struct rpc_task *task, void *calldata) { struct nfs_write_data *data = calldata; @@ -572,6 +605,8 @@ static void nfs_direct_write_result(struct rpc_task *task, void *calldata) } } } + /* In case we have to resend */ + data->args.stable = NFS_FILE_SYNC; spin_unlock(&dreq->lock); } @@ -585,8 +620,14 @@ static void nfs_direct_write_release(void *calldata) struct nfs_write_data *data = calldata; struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; - if (put_dreq(dreq)) - nfs_direct_write_complete(dreq, data->inode); + spin_lock(&dreq->lock); + if (--dreq->outstanding) { + spin_unlock(&dreq->lock); + return; + } + spin_unlock(&dreq->lock); + + nfs_direct_write_complete(dreq, data->inode); } static const struct rpc_call_ops nfs_write_direct_ops = { @@ -595,62 +636,41 @@ static const struct rpc_call_ops nfs_write_direct_ops = { }; /* - * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE - * operation. If nfs_writedata_alloc() or get_user_pages() fails, - * bail and stop sending more writes. Write length accounting is - * handled automatically by nfs_direct_write_result(). Otherwise, if - * no requests have been sent, just return an error. + * For each nfs_write_data struct that was allocated on the list, dispatch + * an NFS WRITE operation */ -static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync) +static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync) { struct nfs_open_context *ctx = dreq->ctx; struct inode *inode = ctx->dentry->d_inode; + struct list_head *list = &dreq->list; + struct page **pages = dreq->pages; + size_t count = dreq->user_count; + loff_t pos = dreq->pos; size_t wsize = NFS_SERVER(inode)->wsize; - unsigned int wpages = nfs_max_pages(wsize); - unsigned int pgbase; - int result; - ssize_t started = 0; + unsigned int curpage, pgbase; - get_dreq(dreq); - - pgbase = user_addr & ~PAGE_MASK; + curpage = 0; + pgbase = dreq->user_addr & ~PAGE_MASK; do { struct nfs_write_data *data; size_t bytes; - result = -ENOMEM; - data = nfs_writedata_alloc(wpages); - if (unlikely(!data)) - break; - bytes = wsize; if (count < wsize) bytes = count; - data->npages = nfs_direct_count_pages(user_addr, bytes); - down_read(¤t->mm->mmap_sem); - result = get_user_pages(current, current->mm, user_addr, - data->npages, 0, 0, data->pagevec, NULL); - up_read(¤t->mm->mmap_sem); - if (unlikely(result < data->npages)) { - if (result > 0) - nfs_direct_release_pages(data->pagevec, result); - nfs_writedata_release(data); - break; - } - - get_dreq(dreq); - + BUG_ON(list_empty(list)); + data = list_entry(list->next, struct nfs_write_data, pages); list_move_tail(&data->pages, &dreq->rewrite_list); - data->req = (struct nfs_page *) dreq; data->inode = inode; data->cred = ctx->cred; data->args.fh = NFS_FH(inode); data->args.context = ctx; data->args.offset = pos; data->args.pgbase = pgbase; - data->args.pages = data->pagevec; + data->args.pages = &pages[curpage]; data->args.count = bytes; data->res.fattr = &data->fattr; data->res.count = bytes; @@ -674,26 +694,19 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l bytes, (unsigned long long)data->args.offset); - started += bytes; - user_addr += bytes; pos += bytes; pgbase += bytes; + curpage += pgbase >> PAGE_SHIFT; pgbase &= ~PAGE_MASK; count -= bytes; } while (count != 0); - - if (put_dreq(dreq)) - nfs_direct_write_complete(dreq, inode); - - if (started) - return 0; - return result < 0 ? (ssize_t) result : -EFAULT; + BUG_ON(!list_empty(list)); } -static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos) +static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, int nr_pages) { - ssize_t result = 0; + ssize_t result; sigset_t oldset; struct inode *inode = iocb->ki_filp->f_mapping->host; struct rpc_clnt *clnt = NFS_CLIENT(inode); @@ -701,14 +714,17 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz size_t wsize = NFS_SERVER(inode)->wsize; int sync = 0; - dreq = nfs_direct_req_alloc(); + dreq = nfs_direct_write_alloc(count, wsize); if (!dreq) return -ENOMEM; - nfs_alloc_commit_data(dreq); - if (dreq->commit_data == NULL || count < wsize) sync = FLUSH_STABLE; + dreq->user_addr = user_addr; + dreq->user_count = count; + dreq->pos = pos; + dreq->pages = pages; + dreq->npages = nr_pages; dreq->inode = inode; dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); if (!is_sync_kiocb(iocb)) @@ -719,9 +735,8 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz nfs_begin_data_update(inode); rpc_clnt_sigmask(clnt, &oldset); - result = nfs_direct_write_schedule(dreq, user_addr, count, pos, sync); - if (!result) - result = nfs_direct_wait(dreq); + nfs_direct_write_schedule(dreq, sync); + result = nfs_direct_wait(dreq); rpc_clnt_sigunmask(clnt, &oldset); return result; @@ -751,6 +766,8 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) { ssize_t retval = -EINVAL; + int page_count; + struct page **pages; struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -772,7 +789,14 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, if (retval) goto out; - retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos); + retval = nfs_get_user_pages(READ, (unsigned long) buf, + count, &pages); + if (retval < 0) + goto out; + page_count = retval; + + retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos, + pages, page_count); if (retval > 0) iocb->ki_pos = pos + retval; @@ -808,6 +832,8 @@ out: ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) { ssize_t retval; + int page_count; + struct page **pages; struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -835,7 +861,14 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t if (retval) goto out; - retval = nfs_direct_write(iocb, (unsigned long) buf, count, pos); + retval = nfs_get_user_pages(WRITE, (unsigned long) buf, + count, &pages); + if (retval < 0) + goto out; + page_count = retval; + + retval = nfs_direct_write(iocb, (unsigned long) buf, count, + pos, pages, page_count); /* * XXX: nfs_end_data_update() already ensures this file's |