summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/nfs/direct.c12
-rw-r--r--fs/ntfs/inode.c2
-rw-r--r--fs/ramfs/file-nommu.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c26
5 files changed, 30 insertions, 14 deletions
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 2dac3ad2c44b..2c55dd94a1de 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -17,6 +17,8 @@
#include <linux/rxrpc.h>
#include <linux/key.h>
#include <linux/workqueue.h>
+#include <linux/sched.h>
+
#include "afs.h"
#include "afs_vl.h"
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 0c542ec92d5b..00eee87510fe 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -168,7 +168,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
return dreq;
}
-static void nfs_direct_req_release(struct kref *kref)
+static void nfs_direct_req_free(struct kref *kref)
{
struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
@@ -177,6 +177,11 @@ static void nfs_direct_req_release(struct kref *kref)
kmem_cache_free(nfs_direct_cachep, dreq);
}
+static void nfs_direct_req_release(struct nfs_direct_req *dreq)
+{
+ kref_put(&dreq->kref, nfs_direct_req_free);
+}
+
/*
* Collects and returns the final error value/byte-count.
*/
@@ -196,7 +201,6 @@ static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
result = dreq->count;
out:
- kref_put(&dreq->kref, nfs_direct_req_release);
return (ssize_t) result;
}
@@ -214,7 +218,7 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
}
complete_all(&dreq->completion);
- kref_put(&dreq->kref, nfs_direct_req_release);
+ nfs_direct_req_release(dreq);
}
/*
@@ -369,6 +373,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size
if (!result)
result = nfs_direct_wait(dreq);
rpc_clnt_sigunmask(clnt, &oldset);
+ nfs_direct_req_release(dreq);
return result;
}
@@ -716,6 +721,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz
if (!result)
result = nfs_direct_wait(dreq);
rpc_clnt_sigunmask(clnt, &oldset);
+ nfs_direct_req_release(dreq);
return result;
}
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 074791ce4ab2..b532a730cec2 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -140,7 +140,7 @@ static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
if (!ni->name)
return -ENOMEM;
memcpy(ni->name, na->name, i);
- ni->name[i] = 0;
+ ni->name[na->name_len] = 0;
}
return 0;
}
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 3b481d557edb..9345a46ffb32 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -179,7 +179,7 @@ static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
return ret;
}
- ret = vmtruncate(inode, size);
+ ret = vmtruncate(inode, newsize);
return ret;
}
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 4475588e973a..7361861e3aac 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -701,7 +701,7 @@ xfs_is_delayed_page(
else if (buffer_delay(bh))
acceptable = (type == IOMAP_DELAY);
else if (buffer_dirty(bh) && buffer_mapped(bh))
- acceptable = (type == 0);
+ acceptable = (type == IOMAP_NEW);
else
break;
} while ((bh = bh->b_this_page) != head);
@@ -810,7 +810,7 @@ xfs_convert_page(
page_dirty--;
count++;
} else {
- type = 0;
+ type = IOMAP_NEW;
if (buffer_mapped(bh) && all_bh && startio) {
lock_buffer(bh);
xfs_add_to_ioend(inode, bh, offset,
@@ -968,8 +968,8 @@ xfs_page_state_convert(
bh = head = page_buffers(page);
offset = page_offset(page);
- flags = -1;
- type = IOMAP_READ;
+ flags = BMAPI_READ;
+ type = IOMAP_NEW;
/* TODO: cleanup count and page_dirty */
@@ -999,14 +999,14 @@ xfs_page_state_convert(
*
* Third case, an unmapped buffer was found, and we are
* in a path where we need to write the whole page out.
- */
+ */
if (buffer_unwritten(bh) || buffer_delay(bh) ||
((buffer_uptodate(bh) || PageUptodate(page)) &&
!buffer_mapped(bh) && (unmapped || startio))) {
- /*
+ /*
* Make sure we don't use a read-only iomap
*/
- if (flags == BMAPI_READ)
+ if (flags == BMAPI_READ)
iomap_valid = 0;
if (buffer_unwritten(bh)) {
@@ -1055,7 +1055,7 @@ xfs_page_state_convert(
* That means it must already have extents allocated
* underneath it. Map the extent by reading it.
*/
- if (!iomap_valid || type != IOMAP_READ) {
+ if (!iomap_valid || flags != BMAPI_READ) {
flags = BMAPI_READ;
size = xfs_probe_cluster(inode, page, bh,
head, 1);
@@ -1066,7 +1066,15 @@ xfs_page_state_convert(
iomap_valid = xfs_iomap_valid(&iomap, offset);
}
- type = IOMAP_READ;
+ /*
+ * We set the type to IOMAP_NEW in case we are doing a
+ * small write at EOF that is extending the file but
+ * without needing an allocation. We need to update the
+ * file size on I/O completion in this case so it is
+ * the same case as having just allocated a new extent
+ * that we are writing into for the first time.
+ */
+ type = IOMAP_NEW;
if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
ASSERT(buffer_mapped(bh));
if (iomap_valid)
OpenPOWER on IntegriCloud