summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/ntfs/file.c26
-rw-r--r--include/asm-i386/uaccess.h6
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/filemap.h26
4 files changed, 40 insertions, 26 deletions
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 88292f9e4b9b..2e42c2dcae12 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -1358,7 +1358,7 @@ err_out:
goto out;
}
-static size_t __ntfs_copy_from_user_iovec(char *vaddr,
+static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr,
const struct iovec *iov, size_t iov_ofs, size_t bytes)
{
size_t total = 0;
@@ -1376,10 +1376,6 @@ static size_t __ntfs_copy_from_user_iovec(char *vaddr,
bytes -= len;
vaddr += len;
if (unlikely(left)) {
- /*
- * Zero the rest of the target like __copy_from_user().
- */
- memset(vaddr, 0, bytes);
total -= left;
break;
}
@@ -1420,11 +1416,13 @@ static inline void ntfs_set_next_iovec(const struct iovec **iovp,
* pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
* single-segment behaviour.
*
- * We call the same helper (__ntfs_copy_from_user_iovec()) both when atomic and
- * when not atomic. This is ok because __ntfs_copy_from_user_iovec() calls
- * __copy_from_user_inatomic() and it is ok to call this when non-atomic. In
- * fact, the only difference between __copy_from_user_inatomic() and
- * __copy_from_user() is that the latter calls might_sleep(). And on many
+ * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both
+ * when atomic and when not atomic. This is ok because
+ * __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic()
+ * and it is ok to call this when non-atomic.
+ * Infact, the only difference between __copy_from_user_inatomic() and
+ * __copy_from_user() is that the latter calls might_sleep() and the former
+ * should not zero the tail of the buffer on error. And on many
* architectures __copy_from_user_inatomic() is just defined to
* __copy_from_user() so it makes no difference at all on those architectures.
*/
@@ -1441,14 +1439,18 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
if (len > bytes)
len = bytes;
kaddr = kmap_atomic(*pages, KM_USER0);
- copied = __ntfs_copy_from_user_iovec(kaddr + ofs,
+ copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs,
*iov, *iov_ofs, len);
kunmap_atomic(kaddr, KM_USER0);
if (unlikely(copied != len)) {
/* Do it the slow way. */
kaddr = kmap(*pages);
- copied = __ntfs_copy_from_user_iovec(kaddr + ofs,
+ copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs,
*iov, *iov_ofs, len);
+ /*
+ * Zero the rest of the target like __copy_from_user().
+ */
+ memset(kaddr + ofs + copied, 0, len - copied);
kunmap(*pages);
if (unlikely(copied != len))
goto err_out;
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 8462f8e0e658..d0d253277be5 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -458,6 +458,12 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
+ *
+ * An alternate version - __copy_from_user_inatomic() - may be called from
+ * atomic context and will fail rather than sleep. In this case the
+ * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
+ * for explanation of why this is needed.
+ * FIXME this isn't implimented yet EMXIF
*/
static __always_inline unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
diff --git a/mm/filemap.c b/mm/filemap.c
index 807a463fd5ed..1ed4be2a7654 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1892,7 +1892,7 @@ int remove_suid(struct dentry *dentry)
EXPORT_SYMBOL(remove_suid);
size_t
-__filemap_copy_from_user_iovec(char *vaddr,
+__filemap_copy_from_user_iovec_inatomic(char *vaddr,
const struct iovec *iov, size_t base, size_t bytes)
{
size_t copied = 0, left = 0;
@@ -1908,12 +1908,8 @@ __filemap_copy_from_user_iovec(char *vaddr,
vaddr += copy;
iov++;
- if (unlikely(left)) {
- /* zero the rest of the target like __copy_from_user */
- if (bytes)
- memset(vaddr, 0, bytes);
+ if (unlikely(left))
break;
- }
}
return copied - left;
}
diff --git a/mm/filemap.h b/mm/filemap.h
index 5683cde22055..536979fb4ba7 100644
--- a/mm/filemap.h
+++ b/mm/filemap.h
@@ -16,15 +16,23 @@
#include <linux/uaccess.h>
size_t
-__filemap_copy_from_user_iovec(char *vaddr,
- const struct iovec *iov,
- size_t base,
- size_t bytes);
+__filemap_copy_from_user_iovec_inatomic(char *vaddr,
+ const struct iovec *iov,
+ size_t base,
+ size_t bytes);
/*
* Copy as much as we can into the page and return the number of bytes which
* were sucessfully copied. If a fault is encountered then clear the page
* out to (offset+bytes) and return the number of bytes which were copied.
+ *
+ * NOTE: For this to work reliably we really want copy_from_user_inatomic_nocache
+ * to *NOT* zero any tail of the buffer that it failed to copy. If it does,
+ * and if the following non-atomic copy succeeds, then there is a small window
+ * where the target page contains neither the data before the write, nor the
+ * data after the write (it contains zero). A read at this time will see
+ * data that is inconsistent with any ordering of the read and the write.
+ * (This has been detected in practice).
*/
static inline size_t
filemap_copy_from_user(struct page *page, unsigned long offset,
@@ -60,13 +68,15 @@ filemap_copy_from_user_iovec(struct page *page, unsigned long offset,
size_t copied;
kaddr = kmap_atomic(page, KM_USER0);
- copied = __filemap_copy_from_user_iovec(kaddr + offset, iov,
- base, bytes);
+ copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov,
+ base, bytes);
kunmap_atomic(kaddr, KM_USER0);
if (copied != bytes) {
kaddr = kmap(page);
- copied = __filemap_copy_from_user_iovec(kaddr + offset, iov,
- base, bytes);
+ copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov,
+ base, bytes);
+ if (bytes - copied)
+ memset(kaddr + offset + copied, 0, bytes - copied);
kunmap(page);
}
return copied;
OpenPOWER on IntegriCloud