diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 27 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/bitmap.c | 166 | ||||
-rw-r--r-- | lib/extable.c | 3 | ||||
-rw-r--r-- | lib/idr.c | 35 | ||||
-rw-r--r-- | lib/kobject.c | 1 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 1 | ||||
-rw-r--r-- | lib/sort.c | 1 | ||||
-rw-r--r-- | lib/string.c | 125 | ||||
-rw-r--r-- | lib/swiotlb.c | 811 | ||||
-rw-r--r-- | lib/vsprintf.c | 1 | ||||
-rw-r--r-- | lib/zlib_inflate/inflate.c | 1 |
12 files changed, 1084 insertions, 90 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 016e89a44ac8..156822e3cc79 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -128,7 +128,7 @@ config DEBUG_HIGHMEM config DEBUG_BUGVERBOSE bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED depends on BUG - depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || (X86 && !X86_64) || FRV + depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || X86_32 || FRV default !EMBEDDED help Say Y here to make BUG() panics output the file name and line number @@ -168,13 +168,34 @@ config DEBUG_FS If unsure, say N. +config DEBUG_VM + bool "Debug VM" + depends on DEBUG_KERNEL + help + Enable this to debug the virtual-memory system. + + If unsure, say N. + config FRAME_POINTER bool "Compile the kernel with frame pointers" depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML) default y if DEBUG_INFO && UML help If you say Y here the resulting kernel image will be slightly larger - and slower, but it might give very useful debugging information - on some architectures or you use external debuggers. + and slower, but it might give very useful debugging information on + some architectures or if you use external debuggers. If you don't debug the kernel, you can say N. +config RCU_TORTURE_TEST + tristate "torture tests for RCU" + depends on DEBUG_KERNEL + default n + help + This option provides a kernel module that runs torture tests + on the RCU infrastructure. The kernel module may be built + after the fact on the running kernel to be tested, if desired. + + Say Y here if you want RCU torture tests to start automatically + at boot time (you probably don't). + Say M if you want the RCU torture tests to build as a module. + Say N if you are unsure. diff --git a/lib/Makefile b/lib/Makefile index 44a46750690a..8535f4d7d1c3 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -44,6 +44,8 @@ obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o +obj-$(CONFIG_SWIOTLB) += swiotlb.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/bitmap.c b/lib/bitmap.c index fb9371fdd44a..23d3b1147fe9 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -511,6 +511,172 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) } EXPORT_SYMBOL(bitmap_parselist); +/* + * bitmap_pos_to_ord(buf, pos, bits) + * @buf: pointer to a bitmap + * @pos: a bit position in @buf (0 <= @pos < @bits) + * @bits: number of valid bit positions in @buf + * + * Map the bit at position @pos in @buf (of length @bits) to the + * ordinal of which set bit it is. If it is not set or if @pos + * is not a valid bit position, map to zero (0). + * + * If for example, just bits 4 through 7 are set in @buf, then @pos + * values 4 through 7 will get mapped to 0 through 3, respectively, + * and other @pos values will get mapped to 0. When @pos value 7 + * gets mapped to (returns) @ord value 3 in this example, that means + * that bit 7 is the 3rd (starting with 0th) set bit in @buf. + * + * The bit positions 0 through @bits are valid positions in @buf. + */ +static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) +{ + int ord = 0; + + if (pos >= 0 && pos < bits) { + int i; + + for (i = find_first_bit(buf, bits); + i < pos; + i = find_next_bit(buf, bits, i + 1)) + ord++; + if (i > pos) + ord = 0; + } + return ord; +} + +/** + * bitmap_ord_to_pos(buf, ord, bits) + * @buf: pointer to bitmap + * @ord: ordinal bit position (n-th set bit, n >= 0) + * @bits: number of valid bit positions in @buf + * + * Map the ordinal offset of bit @ord in @buf to its position in @buf. + * If @ord is not the ordinal offset of a set bit in @buf, map to zero (0). + * + * If for example, just bits 4 through 7 are set in @buf, then @ord + * values 0 through 3 will get mapped to 4 through 7, respectively, + * and all other @ord valuds will get mapped to 0. When @ord value 3 + * gets mapped to (returns) @pos value 7 in this example, that means + * that the 3rd set bit (starting with 0th) is at position 7 in @buf. + * + * The bit positions 0 through @bits are valid positions in @buf. + */ +static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) +{ + int pos = 0; + + if (ord >= 0 && ord < bits) { + int i; + + for (i = find_first_bit(buf, bits); + i < bits && ord > 0; + i = find_next_bit(buf, bits, i + 1)) + ord--; + if (i < bits && ord == 0) + pos = i; + } + + return pos; +} + +/** + * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap + * @src: subset to be remapped + * @dst: remapped result + * @old: defines domain of map + * @new: defines range of map + * @bits: number of bits in each of these bitmaps + * + * Let @old and @new define a mapping of bit positions, such that + * whatever position is held by the n-th set bit in @old is mapped + * to the n-th set bit in @new. In the more general case, allowing + * for the possibility that the weight 'w' of @new is less than the + * weight of @old, map the position of the n-th set bit in @old to + * the position of the m-th set bit in @new, where m == n % w. + * + * If either of the @old and @new bitmaps are empty, or if@src and @dst + * point to the same location, then this routine does nothing. + * + * The positions of unset bits in @old are mapped to the position of + * the first set bit in @new. + * + * Apply the above specified mapping to @src, placing the result in + * @dst, clearing any bits previously set in @dst. + * + * The resulting value of @dst will have either the same weight as + * @src, or less weight in the general case that the mapping wasn't + * injective due to the weight of @new being less than that of @old. + * The resulting value of @dst will never have greater weight than + * that of @src, except perhaps in the case that one of the above + * conditions was not met and this routine just returned. + * + * For example, lets say that @old has bits 4 through 7 set, and + * @new has bits 12 through 15 set. This defines the mapping of bit + * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other + * bit positions to 12 (the first set bit in @new. So if say @src + * comes into this routine with bits 1, 5 and 7 set, then @dst should + * leave with bits 12, 13 and 15 set. + */ +void bitmap_remap(unsigned long *dst, const unsigned long *src, + const unsigned long *old, const unsigned long *new, + int bits) +{ + int s; + + if (bitmap_weight(old, bits) == 0) + return; + if (bitmap_weight(new, bits) == 0) + return; + if (dst == src) /* following doesn't handle inplace remaps */ + return; + + bitmap_zero(dst, bits); + for (s = find_first_bit(src, bits); + s < bits; + s = find_next_bit(src, bits, s + 1)) { + int x = bitmap_pos_to_ord(old, s, bits); + int y = bitmap_ord_to_pos(new, x, bits); + set_bit(y, dst); + } +} +EXPORT_SYMBOL(bitmap_remap); + +/** + * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit + * @oldbit - bit position to be mapped + * @old: defines domain of map + * @new: defines range of map + * @bits: number of bits in each of these bitmaps + * + * Let @old and @new define a mapping of bit positions, such that + * whatever position is held by the n-th set bit in @old is mapped + * to the n-th set bit in @new. In the more general case, allowing + * for the possibility that the weight 'w' of @new is less than the + * weight of @old, map the position of the n-th set bit in @old to + * the position of the m-th set bit in @new, where m == n % w. + * + * The positions of unset bits in @old are mapped to the position of + * the first set bit in @new. + * + * Apply the above specified mapping to bit position @oldbit, returning + * the new bit position. + * + * For example, lets say that @old has bits 4 through 7 set, and + * @new has bits 12 through 15 set. This defines the mapping of bit + * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other + * bit positions to 12 (the first set bit in @new. So if say @oldbit + * is 5, then this routine returns 13. + */ +int bitmap_bitremap(int oldbit, const unsigned long *old, + const unsigned long *new, int bits) +{ + int x = bitmap_pos_to_ord(old, oldbit, bits); + return bitmap_ord_to_pos(new, x, bits); +} +EXPORT_SYMBOL(bitmap_bitremap); + /** * bitmap_find_free_region - find a contiguous aligned mem region * @bitmap: an array of unsigned longs corresponding to the bitmap diff --git a/lib/extable.c b/lib/extable.c index 3f677a8f0c3c..18df57c029df 100644 --- a/lib/extable.c +++ b/lib/extable.c @@ -16,9 +16,6 @@ #include <linux/sort.h> #include <asm/uaccess.h> -extern struct exception_table_entry __start___ex_table[]; -extern struct exception_table_entry __stop___ex_table[]; - #ifndef ARCH_HAS_SORT_EXTABLE /* * The exception table needs to be sorted so that the binary diff --git a/lib/idr.c b/lib/idr.c index 6414b2fb482d..d226259c3c28 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -6,20 +6,20 @@ * Modified by George Anzinger to reuse immediately and to use * find bit instructions. Also removed _irq on spinlocks. * - * Small id to pointer translation service. + * Small id to pointer translation service. * - * It uses a radix tree like structure as a sparse array indexed + * It uses a radix tree like structure as a sparse array indexed * by the id to obtain the pointer. The bitmap makes allocating - * a new id quick. + * a new id quick. * * You call it to allocate an id (an int) an associate with that id a * pointer or what ever, we treat it as a (void *). You can pass this * id to a user for him to pass back at a later time. You then pass * that id to this code and it returns your pointer. - * You can release ids at any time. When all ids are released, most of + * You can release ids at any time. When all ids are released, most of * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we - * don't need to go to the memory "store" during an id allocate, just + * don't need to go to the memory "store" during an id allocate, just * so you don't need to be too concerned about locking and conflicts * with the slab allocator. */ @@ -77,7 +77,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) while (idp->id_free_cnt < IDR_FREE_MAX) { struct idr_layer *new; new = kmem_cache_alloc(idr_layer_cache, gfp_mask); - if(new == NULL) + if (new == NULL) return (0); free_layer(idp, new); } @@ -107,7 +107,7 @@ static int sub_alloc(struct idr *idp, void *ptr, int *starting_id) if (m == IDR_SIZE) { /* no space available go back to previous layer. */ l++; - id = (id | ((1 << (IDR_BITS*l))-1)) + 1; + id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; if (!(p = pa[l])) { *starting_id = id; return -2; @@ -161,7 +161,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) { struct idr_layer *p, *new; int layers, v, id; - + id = starting_id; build_up: p = idp->top; @@ -225,6 +225,7 @@ build_up: int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) { int rv; + rv = idr_get_new_above_int(idp, ptr, starting_id); /* * This is a cheap hack until the IDR code can be fixed to @@ -259,6 +260,7 @@ EXPORT_SYMBOL(idr_get_new_above); int idr_get_new(struct idr *idp, void *ptr, int *id) { int rv; + rv = idr_get_new_above_int(idp, ptr, 0); /* * This is a cheap hack until the IDR code can be fixed to @@ -306,11 +308,10 @@ static void sub_remove(struct idr *idp, int shift, int id) free_layer(idp, **paa); **paa-- = NULL; } - if ( ! *paa ) + if (!*paa) idp->layers = 0; - } else { + } else idr_remove_warning(id); - } } /** @@ -326,9 +327,8 @@ void idr_remove(struct idr *idp, int id) id &= MAX_ID_MASK; sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); - if ( idp->top && idp->top->count == 1 && - (idp->layers > 1) && - idp->top->ary[0]){ // We can drop a layer + if (idp->top && idp->top->count == 1 && (idp->layers > 1) && + idp->top->ary[0]) { // We can drop a layer p = idp->top->ary[0]; idp->top->bitmap = idp->top->count = 0; @@ -337,7 +337,6 @@ void idr_remove(struct idr *idp, int id) --idp->layers; } while (idp->id_free_cnt >= IDR_FREE_MAX) { - p = alloc_layer(idp); kmem_cache_free(idr_layer_cache, p); return; @@ -391,8 +390,8 @@ void *idr_find(struct idr *idp, int id) } EXPORT_SYMBOL(idr_find); -static void idr_cache_ctor(void * idr_layer, - kmem_cache_t *idr_layer_cache, unsigned long flags) +static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache, + unsigned long flags) { memset(idr_layer, 0, sizeof(struct idr_layer)); } @@ -400,7 +399,7 @@ static void idr_cache_ctor(void * idr_layer, static int init_id_cache(void) { if (!idr_layer_cache) - idr_layer_cache = kmem_cache_create("idr_layer_cache", + idr_layer_cache = kmem_cache_create("idr_layer_cache", sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL); return 0; } diff --git a/lib/kobject.c b/lib/kobject.c index 253d3004ace9..a181abed89f6 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -14,6 +14,7 @@ #include <linux/string.h> #include <linux/module.h> #include <linux/stat.h> +#include <linux/slab.h> /** * populate_dir - populate directory with attributes. diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 42c08ef828c5..eddc9b3d3876 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -5,6 +5,7 @@ */ #include <linux/module.h> #include <linux/kallsyms.h> +#include <linux/sched.h> unsigned int debug_smp_processor_id(void) { diff --git a/lib/sort.c b/lib/sort.c index ddc4d35df289..5f3b51ffa1dc 100644 --- a/lib/sort.c +++ b/lib/sort.c @@ -7,6 +7,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sort.h> +#include <linux/slab.h> static void u32_swap(void *a, void *b, int size) { diff --git a/lib/string.c b/lib/string.c index d886ef157c12..037a48acedbb 100644 --- a/lib/string.c +++ b/lib/string.c @@ -36,11 +36,13 @@ int strnicmp(const char *s1, const char *s2, size_t len) /* Yes, Virginia, it had better be unsigned */ unsigned char c1, c2; - c1 = 0; c2 = 0; + c1 = c2 = 0; if (len) { do { - c1 = *s1; c2 = *s2; - s1++; s2++; + c1 = *s1; + c2 = *s2; + s1++; + s2++; if (!c1) break; if (!c2) @@ -55,7 +57,6 @@ int strnicmp(const char *s1, const char *s2, size_t len) } return (int)c1 - (int)c2; } - EXPORT_SYMBOL(strnicmp); #endif @@ -66,7 +67,7 @@ EXPORT_SYMBOL(strnicmp); * @src: Where to copy the string from */ #undef strcpy -char * strcpy(char * dest,const char *src) +char *strcpy(char *dest, const char *src) { char *tmp = dest; @@ -91,12 +92,13 @@ EXPORT_SYMBOL(strcpy); * count, the remainder of @dest will be padded with %NUL. * */ -char * strncpy(char * dest,const char *src,size_t count) +char *strncpy(char *dest, const char *src, size_t count) { char *tmp = dest; while (count) { - if ((*tmp = *src) != 0) src++; + if ((*tmp = *src) != 0) + src++; tmp++; count--; } @@ -122,7 +124,7 @@ size_t strlcpy(char *dest, const char *src, size_t size) size_t ret = strlen(src); if (size) { - size_t len = (ret >= size) ? size-1 : ret; + size_t len = (ret >= size) ? size - 1 : ret; memcpy(dest, src, len); dest[len] = '\0'; } @@ -138,7 +140,7 @@ EXPORT_SYMBOL(strlcpy); * @src: The string to append to it */ #undef strcat -char * strcat(char * dest, const char * src) +char *strcat(char *dest, const char *src) { char *tmp = dest; @@ -146,7 +148,6 @@ char * strcat(char * dest, const char * src) dest++; while ((*dest++ = *src++) != '\0') ; - return tmp; } EXPORT_SYMBOL(strcat); @@ -162,7 +163,7 @@ EXPORT_SYMBOL(strcat); * Note that in contrast to strncpy, strncat ensures the result is * terminated. */ -char * strncat(char *dest, const char *src, size_t count) +char *strncat(char *dest, const char *src, size_t count) { char *tmp = dest; @@ -176,7 +177,6 @@ char * strncat(char *dest, const char *src, size_t count) } } } - return tmp; } EXPORT_SYMBOL(strncat); @@ -216,15 +216,14 @@ EXPORT_SYMBOL(strlcat); * @ct: Another string */ #undef strcmp -int strcmp(const char * cs,const char * ct) +int strcmp(const char *cs, const char *ct) { - register signed char __res; + signed char __res; while (1) { if ((__res = *cs - *ct++) != 0 || !*cs++) break; } - return __res; } EXPORT_SYMBOL(strcmp); @@ -237,16 +236,15 @@ EXPORT_SYMBOL(strcmp); * @ct: Another string * @count: The maximum number of bytes to compare */ -int strncmp(const char * cs,const char * ct,size_t count) +int strncmp(const char *cs, const char *ct, size_t count) { - register signed char __res = 0; + signed char __res = 0; while (count) { if ((__res = *cs - *ct++) != 0 || !*cs++) break; count--; } - return __res; } EXPORT_SYMBOL(strncmp); @@ -258,12 +256,12 @@ EXPORT_SYMBOL(strncmp); * @s: The string to be searched * @c: The character to search for */ -char * strchr(const char * s, int c) +char *strchr(const char *s, int c) { - for(; *s != (char) c; ++s) + for (; *s != (char)c; ++s) if (*s == '\0') return NULL; - return (char *) s; + return (char *)s; } EXPORT_SYMBOL(strchr); #endif @@ -274,7 +272,7 @@ EXPORT_SYMBOL(strchr); * @s: The string to be searched * @c: The character to search for */ -char * strrchr(const char * s, int c) +char *strrchr(const char *s, int c) { const char *p = s + strlen(s); do { @@ -296,8 +294,8 @@ EXPORT_SYMBOL(strrchr); char *strnchr(const char *s, size_t count, int c) { for (; count-- && *s != '\0'; ++s) - if (*s == (char) c) - return (char *) s; + if (*s == (char)c) + return (char *)s; return NULL; } EXPORT_SYMBOL(strnchr); @@ -308,7 +306,7 @@ EXPORT_SYMBOL(strnchr); * strlen - Find the length of a string * @s: The string to be sized */ -size_t strlen(const char * s) +size_t strlen(const char *s) { const char *sc; @@ -325,7 +323,7 @@ EXPORT_SYMBOL(strlen); * @s: The string to be sized * @count: The maximum number of bytes to search */ -size_t strnlen(const char * s, size_t count) +size_t strnlen(const char *s, size_t count) { const char *sc; @@ -358,7 +356,6 @@ size_t strspn(const char *s, const char *accept) return count; ++count; } - return count; } @@ -384,9 +381,8 @@ size_t strcspn(const char *s, const char *reject) } ++count; } - return count; -} +} EXPORT_SYMBOL(strcspn); #ifndef __HAVE_ARCH_STRPBRK @@ -395,14 +391,14 @@ EXPORT_SYMBOL(strcspn); * @cs: The string to be searched * @ct: The characters to search for */ -char * strpbrk(const char * cs,const char * ct) +char *strpbrk(const char *cs, const char *ct) { - const char *sc1,*sc2; + const char *sc1, *sc2; - for( sc1 = cs; *sc1 != '\0'; ++sc1) { - for( sc2 = ct; *sc2 != '\0'; ++sc2) { + for (sc1 = cs; *sc1 != '\0'; ++sc1) { + for (sc2 = ct; *sc2 != '\0'; ++sc2) { if (*sc1 == *sc2) - return (char *) sc1; + return (char *)sc1; } } return NULL; @@ -422,9 +418,10 @@ EXPORT_SYMBOL(strpbrk); * of that name. In fact, it was stolen from glibc2 and de-fancy-fied. * Same semantics, slimmer shape. ;) */ -char * strsep(char **s, const char *ct) +char *strsep(char **s, const char *ct) { - char *sbegin = *s, *end; + char *sbegin = *s; + char *end; if (sbegin == NULL) return NULL; @@ -433,10 +430,8 @@ char * strsep(char **s, const char *ct) if (end) *end++ = '\0'; *s = end; - return sbegin; } - EXPORT_SYMBOL(strsep); #endif @@ -449,13 +444,12 @@ EXPORT_SYMBOL(strsep); * * Do not use memset() to access IO space, use memset_io() instead. */ -void * memset(void * s,int c,size_t count) +void *memset(void *s, int c, size_t count) { - char *xs = (char *) s; + char *xs = s; while (count--) *xs++ = c; - return s; } EXPORT_SYMBOL(memset); @@ -471,13 +465,13 @@ EXPORT_SYMBOL(memset); * You should not use this function to access IO space, use memcpy_toio() * or memcpy_fromio() instead. */ -void * memcpy(void * dest,const void *src,size_t count) +void *memcpy(void *dest, const void *src, size_t count) { - char *tmp = (char *) dest, *s = (char *) src; + char *tmp = dest; + char *s = src; while (count--) *tmp++ = *s++; - return dest; } EXPORT_SYMBOL(memcpy); @@ -492,23 +486,24 @@ EXPORT_SYMBOL(memcpy); * * Unlike memcpy(), memmove() copes with overlapping areas. */ -void * memmove(void * dest,const void *src,size_t count) +void *memmove(void *dest, const void *src, size_t count) { - char *tmp, *s; + char *tmp; + const char *s; if (dest <= src) { - tmp = (char *) dest; - s = (char *) src; + tmp = dest; + s = src; while (count--) *tmp++ = *s++; - } - else { - tmp = (char *) dest + count; - s = (char *) src + count; + } else { + tmp = dest; + tmp += count; + s = src; + s += count; while (count--) *--tmp = *--s; - } - + } return dest; } EXPORT_SYMBOL(memmove); @@ -522,12 +517,12 @@ EXPORT_SYMBOL(memmove); * @count: The size of the area. */ #undef memcmp -int memcmp(const void * cs,const void * ct,size_t count) +int memcmp(const void *cs, const void *ct, size_t count) { const unsigned char *su1, *su2; int res = 0; - for( su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) + for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) if ((res = *su1 - *su2) != 0) break; return res; @@ -545,17 +540,17 @@ EXPORT_SYMBOL(memcmp); * returns the address of the first occurrence of @c, or 1 byte past * the area if @c is not found */ -void * memscan(void * addr, int c, size_t size) +void *memscan(void *addr, int c, size_t size) { - unsigned char * p = (unsigned char *) addr; + unsigned char *p = addr; while (size) { if (*p == c) - return (void *) p; + return (void *)p; p++; size--; } - return (void *) p; + return (void *)p; } EXPORT_SYMBOL(memscan); #endif @@ -566,18 +561,18 @@ EXPORT_SYMBOL(memscan); * @s1: The string to be searched * @s2: The string to search for */ -char * strstr(const char * s1,const char * s2) +char *strstr(const char *s1, const char *s2) { int l1, l2; l2 = strlen(s2); if (!l2) - return (char *) s1; + return (char *)s1; l1 = strlen(s1); while (l1 >= l2) { l1--; - if (!memcmp(s1,s2,l2)) - return (char *) s1; + if (!memcmp(s1, s2, l2)) + return (char *)s1; s1++; } return NULL; @@ -600,7 +595,7 @@ void *memchr(const void *s, int c, size_t n) const unsigned char *p = s; while (n-- != 0) { if ((unsigned char)c == *p++) { - return (void *)(p-1); + return (void *)(p - 1); } } return NULL; diff --git a/lib/swiotlb.c b/lib/swiotlb.c new file mode 100644 index 000000000000..57216f3544ca --- /dev/null +++ b/lib/swiotlb.c @@ -0,0 +1,811 @@ +/* + * Dynamic DMA mapping support. + * + * This implementation is for IA-64 and EM64T platforms that do not support + * I/O TLBs (aka DMA address translation hardware). + * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> + * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> + * Copyright (C) 2000, 2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * + * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. + * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid + * unnecessary i-cache flushing. + * 04/07/.. ak Better overflow handling. Assorted fixes. + * 05/09/10 linville Add support for syncing ranges, support syncing for + * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. + */ + +#include <linux/cache.h> +#include <linux/dma-mapping.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ctype.h> + +#include <asm/io.h> +#include <asm/dma.h> +#include <asm/scatterlist.h> + +#include <linux/init.h> +#include <linux/bootmem.h> + +#define OFFSET(val,align) ((unsigned long) \ + ( (val) & ( (align) - 1))) + +#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) +#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) + +/* + * Maximum allowable number of contiguous slabs to map, + * must be a power of 2. What is the appropriate value ? + * The complexity of {map,unmap}_single is linearly dependent on this value. + */ +#define IO_TLB_SEGSIZE 128 + +/* + * log of the size of each IO TLB slab. The number of slabs is command line + * controllable. + */ +#define IO_TLB_SHIFT 11 + +#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) + +/* + * Minimum IO TLB size to bother booting with. Systems with mainly + * 64bit capable cards will only lightly use the swiotlb. If we can't + * allocate a contiguous 1MB, we're probably in trouble anyway. + */ +#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) + +/* + * Enumeration for sync targets + */ +enum dma_sync_target { + SYNC_FOR_CPU = 0, + SYNC_FOR_DEVICE = 1, +}; + +int swiotlb_force; + +/* + * Used to do a quick range check in swiotlb_unmap_single and + * swiotlb_sync_single_*, to see if the memory was in fact allocated by this + * API. + */ +static char *io_tlb_start, *io_tlb_end; + +/* + * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and + * io_tlb_end. This is command line adjustable via setup_io_tlb_npages. + */ +static unsigned long io_tlb_nslabs; + +/* + * When the IOMMU overflows we return a fallback buffer. This sets the size. + */ +static unsigned long io_tlb_overflow = 32*1024; + +void *io_tlb_overflow_buffer; + +/* + * This is a free list describing the number of free entries available from + * each index + */ +static unsigned int *io_tlb_list; +static unsigned int io_tlb_index; + +/* + * We need to save away the original address corresponding to a mapped entry + * for the sync operations. + */ +static unsigned char **io_tlb_orig_addr; + +/* + * Protect the above data structures in the map and unmap calls + */ +static DEFINE_SPINLOCK(io_tlb_lock); + +static int __init +setup_io_tlb_npages(char *str) +{ + if (isdigit(*str)) { + io_tlb_nslabs = simple_strtoul(str, &str, 0); + /* avoid tail segment of size < IO_TLB_SEGSIZE */ + io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + } + if (*str == ',') + ++str; + if (!strcmp(str, "force")) + swiotlb_force = 1; + return 1; +} +__setup("swiotlb=", setup_io_tlb_npages); +/* make io_tlb_overflow tunable too? */ + +/* + * Statically reserve bounce buffer space and initialize bounce buffer data + * structures for the software IO TLB used to implement the DMA API. + */ +void +swiotlb_init_with_default_size (size_t default_size) +{ + unsigned long i; + + if (!io_tlb_nslabs) { + io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); + io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + } + + /* + * Get IO TLB memory from the low pages + */ + io_tlb_start = alloc_bootmem_low_pages_limit(io_tlb_nslabs * + (1 << IO_TLB_SHIFT), 0x100000000); + if (!io_tlb_start) + panic("Cannot allocate SWIOTLB buffer"); + io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); + + /* + * Allocate and initialize the free list array. This array is used + * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE + * between io_tlb_start and io_tlb_end. + */ + io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); + for (i = 0; i < io_tlb_nslabs; i++) + io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); + io_tlb_index = 0; + io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *)); + + /* + * Get the overflow emergency buffer + */ + io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); + printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", + virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); +} + +void +swiotlb_init (void) +{ + swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ +} + +/* + * Systems with larger DMA zones (those that don't support ISA) can + * initialize the swiotlb later using the slab allocator if needed. + * This should be just like above, but with some error catching. + */ +int +swiotlb_late_init_with_default_size (size_t default_size) +{ + unsigned long i, req_nslabs = io_tlb_nslabs; + unsigned int order; + + if (!io_tlb_nslabs) { + io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); + io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + } + + /* + * Get IO TLB memory from the low pages + */ + order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); + io_tlb_nslabs = SLABS_PER_PAGE << order; + + while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { + io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, + order); + if (io_tlb_start) + break; + order--; + } + + if (!io_tlb_start) + goto cleanup1; + + if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) { + printk(KERN_WARNING "Warning: only able to allocate %ld MB " + "for software IO TLB\n", (PAGE_SIZE << order) >> 20); + io_tlb_nslabs = SLABS_PER_PAGE << order; + } + io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); + memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT)); + + /* + * Allocate and initialize the free list array. This array is used + * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE + * between io_tlb_start and io_tlb_end. + */ + io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, + get_order(io_tlb_nslabs * sizeof(int))); + if (!io_tlb_list) + goto cleanup2; + + for (i = 0; i < io_tlb_nslabs; i++) + io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); + io_tlb_index = 0; + + io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, + get_order(io_tlb_nslabs * sizeof(char *))); + if (!io_tlb_orig_addr) + goto cleanup3; + + memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); + + /* + * Get the overflow emergency buffer + */ + io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA, + get_order(io_tlb_overflow)); + if (!io_tlb_overflow_buffer) + goto cleanup4; + + printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " + "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20, + virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); + + return 0; + +cleanup4: + free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * + sizeof(char *))); + io_tlb_orig_addr = NULL; +cleanup3: + free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * + sizeof(int))); + io_tlb_list = NULL; + io_tlb_end = NULL; +cleanup2: + free_pages((unsigned long)io_tlb_start, order); + io_tlb_start = NULL; +cleanup1: + io_tlb_nslabs = req_nslabs; + return -ENOMEM; +} + +static inline int +address_needs_mapping(struct device *hwdev, dma_addr_t addr) +{ + dma_addr_t mask = 0xffffffff; + /* If the device has a mask, use it, otherwise default to 32 bits */ + if (hwdev && hwdev->dma_mask) + mask = *hwdev->dma_mask; + return (addr & ~mask) != 0; +} + +/* + * Allocates bounce buffer and returns its kernel virtual address. + */ +static void * +map_single(struct device *hwdev, char *buffer, size_t size, int dir) +{ + unsigned long flags; + char *dma_addr; + unsigned int nslots, stride, index, wrap; + int i; + + /* + * For mappings greater than a page, we limit the stride (and + * hence alignment) to a page size. + */ + nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; + if (size > PAGE_SIZE) + stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); + else + stride = 1; + + if (!nslots) + BUG(); + + /* + * Find suitable number of IO TLB entries size that will fit this + * request and allocate a buffer from that IO TLB pool. + */ + spin_lock_irqsave(&io_tlb_lock, flags); + { + wrap = index = ALIGN(io_tlb_index, stride); + + if (index >= io_tlb_nslabs) + wrap = index = 0; + + do { + /* + * If we find a slot that indicates we have 'nslots' + * number of contiguous buffers, we allocate the + * buffers from that slot and mark the entries as '0' + * indicating unavailable. + */ + if (io_tlb_list[index] >= nslots) { + int count = 0; + + for (i = index; i < (int) (index + nslots); i++) + io_tlb_list[i] = 0; + for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) + io_tlb_list[i] = ++count; + dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); + + /* + * Update the indices to avoid searching in + * the next round. + */ + io_tlb_index = ((index + nslots) < io_tlb_nslabs + ? (index + nslots) : 0); + + goto found; + } + index += stride; + if (index >= io_tlb_nslabs) + index = 0; + } while (index != wrap); + + spin_unlock_irqrestore(&io_tlb_lock, flags); + return NULL; + } + found: + spin_unlock_irqrestore(&io_tlb_lock, flags); + + /* + * Save away the mapping from the original address to the DMA address. + * This is needed when we sync the memory. Then we sync the buffer if + * needed. + */ + io_tlb_orig_addr[index] = buffer; + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) + memcpy(dma_addr, buffer, size); + + return dma_addr; +} + +/* + * dma_addr is the kernel virtual address of the bounce buffer to unmap. + */ +static void +unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) +{ + unsigned long flags; + int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; + int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; + char *buffer = io_tlb_orig_addr[index]; + + /* + * First, sync the memory before unmapping the entry + */ + if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) + /* + * bounce... copy the data back into the original buffer * and + * delete the bounce buffer. + */ + memcpy(buffer, dma_addr, size); + + /* + * Return the buffer to the free list by setting the corresponding + * entries to indicate the number of contigous entries available. + * While returning the entries to the free list, we merge the entries + * with slots below and above the pool being returned. + */ + spin_lock_irqsave(&io_tlb_lock, flags); + { + count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ? + io_tlb_list[index + nslots] : 0); + /* + * Step 1: return the slots to the free list, merging the + * slots with superceeding slots + */ + for (i = index + nslots - 1; i >= index; i--) + io_tlb_list[i] = ++count; + /* + * Step 2: merge the returned slots with the preceding slots, + * if available (non zero) + */ + for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) + io_tlb_list[i] = ++count; + } + spin_unlock_irqrestore(&io_tlb_lock, flags); +} + +static void +sync_single(struct device *hwdev, char *dma_addr, size_t size, + int dir, int target) +{ + int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; + char *buffer = io_tlb_orig_addr[index]; + + switch (target) { + case SYNC_FOR_CPU: + if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) + memcpy(buffer, dma_addr, size); + else if (dir != DMA_TO_DEVICE) + BUG(); + break; + case SYNC_FOR_DEVICE: + if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) + memcpy(dma_addr, buffer, size); + else if (dir != DMA_FROM_DEVICE) + BUG(); + break; + default: + BUG(); + } +} + +void * +swiotlb_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) +{ + unsigned long dev_addr; + void *ret; + int order = get_order(size); + + /* + * XXX fix me: the DMA API should pass us an explicit DMA mask + * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32 + * bit range instead of a 16MB one). + */ + flags |= GFP_DMA; + + ret = (void *)__get_free_pages(flags, order); + if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) { + /* + * The allocated memory isn't reachable by the device. + * Fall back on swiotlb_map_single(). + */ + free_pages((unsigned long) ret, order); + ret = NULL; + } + if (!ret) { + /* + * We are either out of memory or the device can't DMA + * to GFP_DMA memory; fall back on + * swiotlb_map_single(), which will grab memory from + * the lowest available address range. + */ + dma_addr_t handle; + handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); + if (dma_mapping_error(handle)) + return NULL; + + ret = phys_to_virt(handle); + } + + memset(ret, 0, size); + dev_addr = virt_to_phys(ret); + + /* Confirm address can be DMA'd by device */ + if (address_needs_mapping(hwdev, dev_addr)) { + printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n", + (unsigned long long)*hwdev->dma_mask, dev_addr); + panic("swiotlb_alloc_coherent: allocated memory is out of " + "range for device"); + } + *dma_handle = dev_addr; + return ret; +} + +void +swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, + dma_addr_t dma_handle) +{ + if (!(vaddr >= (void *)io_tlb_start + && vaddr < (void *)io_tlb_end)) + free_pages((unsigned long) vaddr, get_order(size)); + else + /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ + swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); +} + +static void +swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) +{ + /* + * Ran out of IOMMU space for this operation. This is very bad. + * Unfortunately the drivers cannot handle this operation properly. + * unless they check for dma_mapping_error (most don't) + * When the mapping is small enough return a static buffer to limit + * the damage, or panic when the transfer is too big. + */ + printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at " + "device %s\n", size, dev ? dev->bus_id : "?"); + + if (size > io_tlb_overflow && do_panic) { + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) + panic("DMA: Memory would be corrupted\n"); + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) + panic("DMA: Random memory would be DMAed\n"); + } +} + +/* + * Map a single buffer of the indicated size for DMA in streaming mode. The + * physical address to use is returned. + * + * Once the device is given the dma address, the device owns this memory until + * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. + */ +dma_addr_t +swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) +{ + unsigned long dev_addr = virt_to_phys(ptr); + void *map; + + if (dir == DMA_NONE) + BUG(); + /* + * If the pointer passed in happens to be in the device's DMA window, + * we can safely return the device addr and not worry about bounce + * buffering it. + */ + if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) + return dev_addr; + + /* + * Oh well, have to allocate and map a bounce buffer. + */ + map = map_single(hwdev, ptr, size, dir); + if (!map) { + swiotlb_full(hwdev, size, dir, 1); + map = io_tlb_overflow_buffer; + } + + dev_addr = virt_to_phys(map); + + /* + * Ensure that the address returned is DMA'ble + */ + if (address_needs_mapping(hwdev, dev_addr)) + panic("map_single: bounce buffer is not DMA'ble"); + + return dev_addr; +} + +/* + * Since DMA is i-cache coherent, any (complete) pages that were written via + * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to + * flush them when they get mapped into an executable vm-area. + */ +static void +mark_clean(void *addr, size_t size) +{ + unsigned long pg_addr, end; + + pg_addr = PAGE_ALIGN((unsigned long) addr); + end = (unsigned long) addr + size; + while (pg_addr + PAGE_SIZE <= end) { + struct page *page = virt_to_page(pg_addr); + set_bit(PG_arch_1, &page->flags); + pg_addr += PAGE_SIZE; + } +} + +/* + * Unmap a single streaming mode DMA translation. The dma_addr and size must + * match what was provided for in a previous swiotlb_map_single call. All + * other usages are undefined. + * + * After this call, reads by the cpu to the buffer are guaranteed to see + * whatever the device wrote there. + */ +void +swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, + int dir) +{ + char *dma_addr = phys_to_virt(dev_addr); + + if (dir == DMA_NONE) + BUG(); + if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) + unmap_single(hwdev, dma_addr, size, dir); + else if (dir == DMA_FROM_DEVICE) + mark_clean(dma_addr, size); +} + +/* + * Make physical memory consistent for a single streaming mode DMA translation + * after a transfer. + * + * If you perform a swiotlb_map_single() but wish to interrogate the buffer + * using the cpu, yet do not wish to teardown the dma mapping, you must + * call this function before doing so. At the next point you give the dma + * address back to the card, you must first perform a + * swiotlb_dma_sync_for_device, and then the device again owns the buffer + */ +static inline void +swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir, int target) +{ + char *dma_addr = phys_to_virt(dev_addr); + + if (dir == DMA_NONE) + BUG(); + if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) + sync_single(hwdev, dma_addr, size, dir, target); + else if (dir == DMA_FROM_DEVICE) + mark_clean(dma_addr, size); +} + +void +swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir) +{ + swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); +} + +void +swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir) +{ + swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); +} + +/* + * Same as above, but for a sub-range of the mapping. + */ +static inline void +swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, + unsigned long offset, size_t size, + int dir, int target) +{ + char *dma_addr = phys_to_virt(dev_addr) + offset; + + if (dir == DMA_NONE) + BUG(); + if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) + sync_single(hwdev, dma_addr, size, dir, target); + else if (dir == DMA_FROM_DEVICE) + mark_clean(dma_addr, size); +} + +void +swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, + unsigned long offset, size_t size, int dir) +{ + swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, + SYNC_FOR_CPU); +} + +void +swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, + unsigned long offset, size_t size, int dir) +{ + swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, + SYNC_FOR_DEVICE); +} + +/* + * Map a set of buffers described by scatterlist in streaming mode for DMA. + * This is the scatter-gather version of the above swiotlb_map_single + * interface. Here the scatter gather list elements are each tagged with the + * appropriate dma address and length. They are obtained via + * sg_dma_{address,length}(SG). + * + * NOTE: An implementation may be able to use a smaller number of + * DMA address/length pairs than there are SG table elements. + * (for example via virtual mapping capabilities) + * The routine returns the number of addr/length pairs actually + * used, at most nents. + * + * Device ownership issues as mentioned above for swiotlb_map_single are the + * same here. + */ +int +swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, + int dir) +{ + void *addr; + unsigned long dev_addr; + int i; + + if (dir == DMA_NONE) + BUG(); + + for (i = 0; i < nelems; i++, sg++) { + addr = SG_ENT_VIRT_ADDRESS(sg); + dev_addr = virt_to_phys(addr); + if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { + sg->dma_address = (dma_addr_t) virt_to_phys(map_single(hwdev, addr, sg->length, dir)); + if (!sg->dma_address) { + /* Don't panic here, we expect map_sg users + to do proper error handling. */ + swiotlb_full(hwdev, sg->length, dir, 0); + swiotlb_unmap_sg(hwdev, sg - i, i, dir); + sg[0].dma_length = 0; + return 0; + } + } else + sg->dma_address = dev_addr; + sg->dma_length = sg->length; + } + return nelems; +} + +/* + * Unmap a set of streaming mode DMA translations. Again, cpu read rules + * concerning calls here are the same as for swiotlb_unmap_single() above. + */ +void +swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems, + int dir) +{ + int i; + + if (dir == DMA_NONE) + BUG(); + + for (i = 0; i < nelems; i++, sg++) + if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) + unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir); + else if (dir == DMA_FROM_DEVICE) + mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); +} + +/* + * Make physical memory consistent for a set of streaming mode DMA translations + * after a transfer. + * + * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules + * and usage. + */ +static inline void +swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, + int nelems, int dir, int target) +{ + int i; + + if (dir == DMA_NONE) + BUG(); + + for (i = 0; i < nelems; i++, sg++) + if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) + sync_single(hwdev, (void *) sg->dma_address, + sg->dma_length, dir, target); +} + +void +swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, + int nelems, int dir) +{ + swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); +} + +void +swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, + int nelems, int dir) +{ + swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); +} + +int +swiotlb_dma_mapping_error(dma_addr_t dma_addr) +{ + return (dma_addr == virt_to_phys(io_tlb_overflow_buffer)); +} + +/* + * Return whether the given device DMA address mask can be supported + * properly. For example, if your device can only drive the low 24-bits + * during bus mastering, then you would pass 0x00ffffff as the mask to + * this function. + */ +int +swiotlb_dma_supported (struct device *hwdev, u64 mask) +{ + return (virt_to_phys (io_tlb_end) - 1) <= mask; +} + +EXPORT_SYMBOL(swiotlb_init); +EXPORT_SYMBOL(swiotlb_map_single); +EXPORT_SYMBOL(swiotlb_unmap_single); +EXPORT_SYMBOL(swiotlb_map_sg); +EXPORT_SYMBOL(swiotlb_unmap_sg); +EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); +EXPORT_SYMBOL(swiotlb_sync_single_for_device); +EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); +EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); +EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); +EXPORT_SYMBOL(swiotlb_sync_sg_for_device); +EXPORT_SYMBOL(swiotlb_dma_mapping_error); +EXPORT_SYMBOL(swiotlb_alloc_coherent); +EXPORT_SYMBOL(swiotlb_free_coherent); +EXPORT_SYMBOL(swiotlb_dma_supported); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index e4e9031dd9c3..b07db5ca3f66 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -23,6 +23,7 @@ #include <linux/ctype.h> #include <linux/kernel.h> +#include <asm/page.h> /* for PAGE_SIZE */ #include <asm/div64.h> /** diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c index 3d94cb90c1d3..31b9e9054bf7 100644 --- a/lib/zlib_inflate/inflate.c +++ b/lib/zlib_inflate/inflate.c @@ -3,7 +3,6 @@ * For conditions of distribution and use, see copyright notice in zlib.h */ -#include <linux/module.h> #include <linux/zutil.h> #include "infblock.h" #include "infutil.h" |