diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-26 16:57:16 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-26 16:57:16 -0700 |
commit | 1e2aec873ad6d16538512dbb96853caa1fa076af (patch) | |
tree | d792b19ac47be44debd24610ae27f1330fa490e4 /lib/strncpy_from_user.c | |
parent | ae32adc1e06d096399f195eeda12d443d53539c4 (diff) | |
parent | 2c66f623631709aa5f2e4c14c7e089682e7394a3 (diff) | |
download | blackbird-op-linux-1e2aec873ad6d16538512dbb96853caa1fa076af.tar.gz blackbird-op-linux-1e2aec873ad6d16538512dbb96853caa1fa076af.zip |
Merge branch 'generic-string-functions'
This makes <asm/word-at-a-time.h> actually live up to its promise of
allowing architectures to help tune the string functions that do their
work a word at a time.
David had already taken the x86 strncpy_from_user() function, modified
it to work on sparc, and then done the extra work to make it generically
useful. This then expands on that work by making x86 use that generic
version, completing the circle.
But more importantly, it fixes up the word-at-a-time interfaces so that
it's now easy to also support things like strnlen_user(), and pretty
much most random string functions.
David reports that it all works fine on sparc, and Jonas Bonn reported
that an earlier version of this worked on OpenRISC too. It's pretty
easy for architectures to add support for this and just replace their
private versions with the generic code.
* generic-string-functions:
sparc: use the new generic strnlen_user() function
x86: use the new generic strnlen_user() function
lib: add generic strnlen_user() function
word-at-a-time: make the interfaces truly generic
x86: use generic strncpy_from_user routine
Diffstat (limited to 'lib/strncpy_from_user.c')
-rw-r--r-- | lib/strncpy_from_user.c | 47 |
1 files changed, 7 insertions, 40 deletions
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index c4c09b0e96ba..bb2b201d6ad0 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -4,37 +4,7 @@ #include <linux/errno.h> #include <asm/byteorder.h> - -static inline long find_zero(unsigned long mask) -{ - long byte = 0; - -#ifdef __BIG_ENDIAN -#ifdef CONFIG_64BIT - if (mask >> 32) - mask >>= 32; - else - byte = 4; -#endif - if (mask >> 16) - mask >>= 16; - else - byte += 2; - return (mask >> 8) ? byte : byte + 1; -#else -#ifdef CONFIG_64BIT - if (!((unsigned int) mask)) { - mask >>= 32; - byte = 4; - } -#endif - if (!(mask & 0xffff)) { - mask >>= 16; - byte += 2; - } - return (mask & 0xff) ? byte : byte + 1; -#endif -} +#include <asm/word-at-a-time.h> #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS #define IS_UNALIGNED(src, dst) 0 @@ -51,8 +21,7 @@ static inline long find_zero(unsigned long mask) */ static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max) { - const unsigned long high_bits = REPEAT_BYTE(0xfe) + 1; - const unsigned long low_bits = REPEAT_BYTE(0x7f); + const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; long res = 0; /* @@ -66,18 +35,16 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long goto byte_at_a_time; while (max >= sizeof(unsigned long)) { - unsigned long c, v, rhs; + unsigned long c, data; /* Fall back to byte-at-a-time if we get a page fault */ if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) break; - rhs = c | low_bits; - v = (c + high_bits) & ~rhs; *(unsigned long *)(dst+res) = c; - if (v) { - v = (c & low_bits) + low_bits; - v = ~(v | rhs); - return res + find_zero(v); + if (has_zero(c, &data, &constants)) { + data = prep_zero_mask(c, data, &constants); + data = create_zero_mask(data); + return res + find_zero(data); } res += sizeof(unsigned long); max -= sizeof(unsigned long); |