summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/uaccess_32.h4
-rw-r--r--arch/x86/include/asm/uaccess_64.h25
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--include/linux/uaccess.h4
-rw-r--r--mm/filemap.c11
-rw-r--r--mm/filemap_xip.c2
6 files changed, 17 insertions, 31 deletions
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index a0ba61386972..5e06259e90e5 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -157,7 +157,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
}
static __always_inline unsigned long __copy_from_user_nocache(void *to,
- const void __user *from, unsigned long n, unsigned long total)
+ const void __user *from, unsigned long n)
{
might_fault();
if (__builtin_constant_p(n)) {
@@ -180,7 +180,7 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
static __always_inline unsigned long
__copy_from_user_inatomic_nocache(void *to, const void __user *from,
- unsigned long n, unsigned long total)
+ unsigned long n)
{
return __copy_from_user_ll_nocache_nozero(to, from, n);
}
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index dcaa0404cf7b..8cc687326eb8 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -188,29 +188,18 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
extern long __copy_user_nocache(void *dst, const void __user *src,
unsigned size, int zerorest);
-static inline int __copy_from_user_nocache(void *dst, const void __user *src,
- unsigned size, unsigned long total)
+static inline int
+__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
{
might_sleep();
- /*
- * In practice this limit means that large file write()s
- * which get chunked to 4K copies get handled via
- * non-temporal stores here. Smaller writes get handled
- * via regular __copy_from_user():
- */
- if (likely(total >= PAGE_SIZE))
- return __copy_user_nocache(dst, src, size, 1);
- else
- return __copy_from_user(dst, src, size);
+ return __copy_user_nocache(dst, src, size, 1);
}
-static inline int __copy_from_user_inatomic_nocache(void *dst,
- const void __user *src, unsigned size, unsigned total)
+static inline int
+__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
+ unsigned size)
{
- if (likely(total >= PAGE_SIZE))
- return __copy_user_nocache(dst, src, size, 0);
- else
- return __copy_from_user_inatomic(dst, src, size);
+ return __copy_user_nocache(dst, src, size, 0);
}
unsigned long
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6b209db8370d..818576654092 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -215,7 +215,7 @@ fast_user_write(struct io_mapping *mapping,
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
- user_data, length, length);
+ user_data, length);
io_mapping_unmap_atomic(vaddr_atomic);
if (unwritten)
return -EFAULT;
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 6f3c603b0d67..6b58367d145e 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -41,13 +41,13 @@ static inline void pagefault_enable(void)
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
- const void __user *from, unsigned long n, unsigned long total)
+ const void __user *from, unsigned long n)
{
return __copy_from_user_inatomic(to, from, n);
}
static inline unsigned long __copy_from_user_nocache(void *to,
- const void __user *from, unsigned long n, unsigned long total)
+ const void __user *from, unsigned long n)
{
return __copy_from_user(to, from, n);
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 60fd56772cc6..126d3973b3d1 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1816,14 +1816,14 @@ EXPORT_SYMBOL(file_remove_suid);
static size_t __iovec_copy_from_user_inatomic(char *vaddr,
const struct iovec *iov, size_t base, size_t bytes)
{
- size_t copied = 0, left = 0, total = bytes;
+ size_t copied = 0, left = 0;
while (bytes) {
char __user *buf = iov->iov_base + base;
int copy = min(bytes, iov->iov_len - base);
base = 0;
- left = __copy_from_user_inatomic_nocache(vaddr, buf, copy, total);
+ left = __copy_from_user_inatomic(vaddr, buf, copy);
copied += copy;
bytes -= copy;
vaddr += copy;
@@ -1851,9 +1851,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
if (likely(i->nr_segs == 1)) {
int left;
char __user *buf = i->iov->iov_base + i->iov_offset;
-
- left = __copy_from_user_inatomic_nocache(kaddr + offset,
- buf, bytes, bytes);
+ left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
copied = bytes - left;
} else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -1881,8 +1879,7 @@ size_t iov_iter_copy_from_user(struct page *page,
if (likely(i->nr_segs == 1)) {
int left;
char __user *buf = i->iov->iov_base + i->iov_offset;
-
- left = __copy_from_user_nocache(kaddr + offset, buf, bytes, bytes);
+ left = __copy_from_user(kaddr + offset, buf, bytes);
copied = bytes - left;
} else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index bf54f8a2cf1d..0c04615651b7 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -354,7 +354,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
break;
copied = bytes -
- __copy_from_user_nocache(xip_mem + offset, buf, bytes, bytes);
+ __copy_from_user_nocache(xip_mem + offset, buf, bytes);
if (likely(copied > 0)) {
status = copied;
OpenPOWER on IntegriCloud