diff options
author | Len Brown <len.brown@intel.com> | 2005-12-06 17:31:30 -0500 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2005-12-06 17:31:30 -0500 |
commit | 3d5271f9883cba7b54762bc4fe027d4172f06db7 (patch) | |
tree | ab8a881a14478598a0c8bda0d26c62cdccfffd6d /lib | |
parent | 378b2556f4e09fa6f87ff0cb5c4395ff28257d02 (diff) | |
parent | 9115a6c787596e687df03010d97fccc5e0762506 (diff) | |
download | talos-op-linux-3d5271f9883cba7b54762bc4fe027d4172f06db7.tar.gz talos-op-linux-3d5271f9883cba7b54762bc4fe027d4172f06db7.zip |
Pull release into acpica branch
Diffstat (limited to 'lib')
-rw-r--r-- | lib/.gitignore | 6 | ||||
-rw-r--r-- | lib/Kconfig.debug | 27 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/bitmap.c | 166 | ||||
-rw-r--r-- | lib/extable.c | 3 | ||||
-rw-r--r-- | lib/genalloc.c | 14 | ||||
-rw-r--r-- | lib/idr.c | 50 | ||||
-rw-r--r-- | lib/kobject.c | 3 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 6 | ||||
-rw-r--r-- | lib/radix-tree.c | 53 | ||||
-rw-r--r-- | lib/reed_solomon/Makefile | 2 | ||||
-rw-r--r-- | lib/reed_solomon/decode_rs.c | 36 | ||||
-rw-r--r-- | lib/reed_solomon/encode_rs.c | 14 | ||||
-rw-r--r-- | lib/reed_solomon/reed_solomon.c | 64 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 1 | ||||
-rw-r--r-- | lib/sort.c | 1 | ||||
-rw-r--r-- | lib/string.c | 125 | ||||
-rw-r--r-- | lib/swiotlb.c | 811 | ||||
-rw-r--r-- | lib/textsearch.c | 2 | ||||
-rw-r--r-- | lib/ts_bm.c | 2 | ||||
-rw-r--r-- | lib/ts_fsm.c | 2 | ||||
-rw-r--r-- | lib/ts_kmp.c | 2 | ||||
-rw-r--r-- | lib/vsprintf.c | 1 | ||||
-rw-r--r-- | lib/zlib_inflate/inflate.c | 1 |
24 files changed, 1215 insertions, 179 deletions
diff --git a/lib/.gitignore b/lib/.gitignore new file mode 100644 index 000000000000..3bef1ea94c99 --- /dev/null +++ b/lib/.gitignore @@ -0,0 +1,6 @@ +# +# Generated files +# +gen_crc32table +crc32table.h + diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 016e89a44ac8..156822e3cc79 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -128,7 +128,7 @@ config DEBUG_HIGHMEM config DEBUG_BUGVERBOSE bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED depends on BUG - depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || (X86 && !X86_64) || FRV + depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || X86_32 || FRV default !EMBEDDED help Say Y here to make BUG() panics output the file name and line number @@ -168,13 +168,34 @@ config DEBUG_FS If unsure, say N. +config DEBUG_VM + bool "Debug VM" + depends on DEBUG_KERNEL + help + Enable this to debug the virtual-memory system. + + If unsure, say N. + config FRAME_POINTER bool "Compile the kernel with frame pointers" depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML) default y if DEBUG_INFO && UML help If you say Y here the resulting kernel image will be slightly larger - and slower, but it might give very useful debugging information - on some architectures or you use external debuggers. + and slower, but it might give very useful debugging information on + some architectures or if you use external debuggers. If you don't debug the kernel, you can say N. +config RCU_TORTURE_TEST + tristate "torture tests for RCU" + depends on DEBUG_KERNEL + default n + help + This option provides a kernel module that runs torture tests + on the RCU infrastructure. The kernel module may be built + after the fact on the running kernel to be tested, if desired. + + Say Y here if you want RCU torture tests to start automatically + at boot time (you probably don't). + Say M if you want the RCU torture tests to build as a module. + Say N if you are unsure. diff --git a/lib/Makefile b/lib/Makefile index 44a46750690a..8535f4d7d1c3 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -44,6 +44,8 @@ obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o +obj-$(CONFIG_SWIOTLB) += swiotlb.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/bitmap.c b/lib/bitmap.c index fb9371fdd44a..23d3b1147fe9 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -511,6 +511,172 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) } EXPORT_SYMBOL(bitmap_parselist); +/* + * bitmap_pos_to_ord(buf, pos, bits) + * @buf: pointer to a bitmap + * @pos: a bit position in @buf (0 <= @pos < @bits) + * @bits: number of valid bit positions in @buf + * + * Map the bit at position @pos in @buf (of length @bits) to the + * ordinal of which set bit it is. If it is not set or if @pos + * is not a valid bit position, map to zero (0). + * + * If for example, just bits 4 through 7 are set in @buf, then @pos + * values 4 through 7 will get mapped to 0 through 3, respectively, + * and other @pos values will get mapped to 0. When @pos value 7 + * gets mapped to (returns) @ord value 3 in this example, that means + * that bit 7 is the 3rd (starting with 0th) set bit in @buf. + * + * The bit positions 0 through @bits are valid positions in @buf. + */ +static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) +{ + int ord = 0; + + if (pos >= 0 && pos < bits) { + int i; + + for (i = find_first_bit(buf, bits); + i < pos; + i = find_next_bit(buf, bits, i + 1)) + ord++; + if (i > pos) + ord = 0; + } + return ord; +} + +/** + * bitmap_ord_to_pos(buf, ord, bits) + * @buf: pointer to bitmap + * @ord: ordinal bit position (n-th set bit, n >= 0) + * @bits: number of valid bit positions in @buf + * + * Map the ordinal offset of bit @ord in @buf to its position in @buf. + * If @ord is not the ordinal offset of a set bit in @buf, map to zero (0). + * + * If for example, just bits 4 through 7 are set in @buf, then @ord + * values 0 through 3 will get mapped to 4 through 7, respectively, + * and all other @ord valuds will get mapped to 0. When @ord value 3 + * gets mapped to (returns) @pos value 7 in this example, that means + * that the 3rd set bit (starting with 0th) is at position 7 in @buf. + * + * The bit positions 0 through @bits are valid positions in @buf. + */ +static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) +{ + int pos = 0; + + if (ord >= 0 && ord < bits) { + int i; + + for (i = find_first_bit(buf, bits); + i < bits && ord > 0; + i = find_next_bit(buf, bits, i + 1)) + ord--; + if (i < bits && ord == 0) + pos = i; + } + + return pos; +} + +/** + * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap + * @src: subset to be remapped + * @dst: remapped result + * @old: defines domain of map + * @new: defines range of map + * @bits: number of bits in each of these bitmaps + * + * Let @old and @new define a mapping of bit positions, such that + * whatever position is held by the n-th set bit in @old is mapped + * to the n-th set bit in @new. In the more general case, allowing + * for the possibility that the weight 'w' of @new is less than the + * weight of @old, map the position of the n-th set bit in @old to + * the position of the m-th set bit in @new, where m == n % w. + * + * If either of the @old and @new bitmaps are empty, or if@src and @dst + * point to the same location, then this routine does nothing. + * + * The positions of unset bits in @old are mapped to the position of + * the first set bit in @new. + * + * Apply the above specified mapping to @src, placing the result in + * @dst, clearing any bits previously set in @dst. + * + * The resulting value of @dst will have either the same weight as + * @src, or less weight in the general case that the mapping wasn't + * injective due to the weight of @new being less than that of @old. + * The resulting value of @dst will never have greater weight than + * that of @src, except perhaps in the case that one of the above + * conditions was not met and this routine just returned. + * + * For example, lets say that @old has bits 4 through 7 set, and + * @new has bits 12 through 15 set. This defines the mapping of bit + * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other + * bit positions to 12 (the first set bit in @new. So if say @src + * comes into this routine with bits 1, 5 and 7 set, then @dst should + * leave with bits 12, 13 and 15 set. + */ +void bitmap_remap(unsigned long *dst, const unsigned long *src, + const unsigned long *old, const unsigned long *new, + int bits) +{ + int s; + + if (bitmap_weight(old, bits) == 0) + return; + if (bitmap_weight(new, bits) == 0) + return; + if (dst == src) /* following doesn't handle inplace remaps */ + return; + + bitmap_zero(dst, bits); + for (s = find_first_bit(src, bits); + s < bits; + s = find_next_bit(src, bits, s + 1)) { + int x = bitmap_pos_to_ord(old, s, bits); + int y = bitmap_ord_to_pos(new, x, bits); + set_bit(y, dst); + } +} +EXPORT_SYMBOL(bitmap_remap); + +/** + * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit + * @oldbit - bit position to be mapped + * @old: defines domain of map + * @new: defines range of map + * @bits: number of bits in each of these bitmaps + * + * Let @old and @new define a mapping of bit positions, such that + * whatever position is held by the n-th set bit in @old is mapped + * to the n-th set bit in @new. In the more general case, allowing + * for the possibility that the weight 'w' of @new is less than the + * weight of @old, map the position of the n-th set bit in @old to + * the position of the m-th set bit in @new, where m == n % w. + * + * The positions of unset bits in @old are mapped to the position of + * the first set bit in @new. + * + * Apply the above specified mapping to bit position @oldbit, returning + * the new bit position. + * + * For example, lets say that @old has bits 4 through 7 set, and + * @new has bits 12 through 15 set. This defines the mapping of bit + * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other + * bit positions to 12 (the first set bit in @new. So if say @oldbit + * is 5, then this routine returns 13. + */ +int bitmap_bitremap(int oldbit, const unsigned long *old, + const unsigned long *new, int bits) +{ + int x = bitmap_pos_to_ord(old, oldbit, bits); + return bitmap_ord_to_pos(new, x, bits); +} +EXPORT_SYMBOL(bitmap_bitremap); + /** * bitmap_find_free_region - find a contiguous aligned mem region * @bitmap: an array of unsigned longs corresponding to the bitmap diff --git a/lib/extable.c b/lib/extable.c index 3f677a8f0c3c..18df57c029df 100644 --- a/lib/extable.c +++ b/lib/extable.c @@ -16,9 +16,6 @@ #include <linux/sort.h> #include <asm/uaccess.h> -extern struct exception_table_entry __start___ex_table[]; -extern struct exception_table_entry __stop___ex_table[]; - #ifndef ARCH_HAS_SORT_EXTABLE /* * The exception table needs to be sorted so that the binary diff --git a/lib/genalloc.c b/lib/genalloc.c index d6d30d2e7166..9ce0a6a3b85a 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -95,12 +95,10 @@ unsigned long gen_pool_alloc(struct gen_pool *poolp, int size) if (size > max_chunk_size) return 0; - i = 0; - size = max(size, 1 << ALLOC_MIN_SHIFT); - s = roundup_pow_of_two(size); - - j = i; + i = fls(size - 1); + s = 1 << i; + j = i -= ALLOC_MIN_SHIFT; spin_lock_irqsave(&poolp->lock, flags); while (!h[j].next) { @@ -153,10 +151,10 @@ void gen_pool_free(struct gen_pool *poolp, unsigned long ptr, int size) if (size > max_chunk_size) return; - i = 0; - size = max(size, 1 << ALLOC_MIN_SHIFT); - s = roundup_pow_of_two(size); + i = fls(size - 1); + s = 1 << i; + i -= ALLOC_MIN_SHIFT; a = ptr; diff --git a/lib/idr.c b/lib/idr.c index 6415d053e2bf..d226259c3c28 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -6,20 +6,20 @@ * Modified by George Anzinger to reuse immediately and to use * find bit instructions. Also removed _irq on spinlocks. * - * Small id to pointer translation service. + * Small id to pointer translation service. * - * It uses a radix tree like structure as a sparse array indexed + * It uses a radix tree like structure as a sparse array indexed * by the id to obtain the pointer. The bitmap makes allocating - * a new id quick. + * a new id quick. * * You call it to allocate an id (an int) an associate with that id a * pointer or what ever, we treat it as a (void *). You can pass this * id to a user for him to pass back at a later time. You then pass * that id to this code and it returns your pointer. - * You can release ids at any time. When all ids are released, most of + * You can release ids at any time. When all ids are released, most of * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we - * don't need to go to the memory "store" during an id allocate, just + * don't need to go to the memory "store" during an id allocate, just * so you don't need to be too concerned about locking and conflicts * with the slab allocator. */ @@ -72,12 +72,12 @@ static void free_layer(struct idr *idp, struct idr_layer *p) * If the system is REALLY out of memory this function returns 0, * otherwise 1. */ -int idr_pre_get(struct idr *idp, unsigned gfp_mask) +int idr_pre_get(struct idr *idp, gfp_t gfp_mask) { while (idp->id_free_cnt < IDR_FREE_MAX) { struct idr_layer *new; new = kmem_cache_alloc(idr_layer_cache, gfp_mask); - if(new == NULL) + if (new == NULL) return (0); free_layer(idp, new); } @@ -107,7 +107,7 @@ static int sub_alloc(struct idr *idp, void *ptr, int *starting_id) if (m == IDR_SIZE) { /* no space available go back to previous layer. */ l++; - id = (id | ((1 << (IDR_BITS*l))-1)) + 1; + id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; if (!(p = pa[l])) { *starting_id = id; return -2; @@ -161,7 +161,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) { struct idr_layer *p, *new; int layers, v, id; - + id = starting_id; build_up: p = idp->top; @@ -225,6 +225,7 @@ build_up: int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) { int rv; + rv = idr_get_new_above_int(idp, ptr, starting_id); /* * This is a cheap hack until the IDR code can be fixed to @@ -259,6 +260,7 @@ EXPORT_SYMBOL(idr_get_new_above); int idr_get_new(struct idr *idp, void *ptr, int *id) { int rv; + rv = idr_get_new_above_int(idp, ptr, 0); /* * This is a cheap hack until the IDR code can be fixed to @@ -306,11 +308,10 @@ static void sub_remove(struct idr *idp, int shift, int id) free_layer(idp, **paa); **paa-- = NULL; } - if ( ! *paa ) + if (!*paa) idp->layers = 0; - } else { + } else idr_remove_warning(id); - } } /** @@ -326,9 +327,8 @@ void idr_remove(struct idr *idp, int id) id &= MAX_ID_MASK; sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); - if ( idp->top && idp->top->count == 1 && - (idp->layers > 1) && - idp->top->ary[0]){ // We can drop a layer + if (idp->top && idp->top->count == 1 && (idp->layers > 1) && + idp->top->ary[0]) { // We can drop a layer p = idp->top->ary[0]; idp->top->bitmap = idp->top->count = 0; @@ -337,7 +337,6 @@ void idr_remove(struct idr *idp, int id) --idp->layers; } while (idp->id_free_cnt >= IDR_FREE_MAX) { - p = alloc_layer(idp); kmem_cache_free(idr_layer_cache, p); return; @@ -346,6 +345,19 @@ void idr_remove(struct idr *idp, int id) EXPORT_SYMBOL(idr_remove); /** + * idr_destroy - release all cached layers within an idr tree + * idp: idr handle + */ +void idr_destroy(struct idr *idp) +{ + while (idp->id_free_cnt) { + struct idr_layer *p = alloc_layer(idp); + kmem_cache_free(idr_layer_cache, p); + } +} +EXPORT_SYMBOL(idr_destroy); + +/** * idr_find - return pointer for given id * @idp: idr handle * @id: lookup key @@ -378,8 +390,8 @@ void *idr_find(struct idr *idp, int id) } EXPORT_SYMBOL(idr_find); -static void idr_cache_ctor(void * idr_layer, - kmem_cache_t *idr_layer_cache, unsigned long flags) +static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache, + unsigned long flags) { memset(idr_layer, 0, sizeof(struct idr_layer)); } @@ -387,7 +399,7 @@ static void idr_cache_ctor(void * idr_layer, static int init_id_cache(void) { if (!idr_layer_cache) - idr_layer_cache = kmem_cache_create("idr_layer_cache", + idr_layer_cache = kmem_cache_create("idr_layer_cache", sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL); return 0; } diff --git a/lib/kobject.c b/lib/kobject.c index dd0917dd9fa9..a181abed89f6 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -14,6 +14,7 @@ #include <linux/string.h> #include <linux/module.h> #include <linux/stat.h> +#include <linux/slab.h> /** * populate_dir - populate directory with attributes. @@ -100,7 +101,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length) * @kobj: kobject in question, with which to build the path * @gfp_mask: the allocation type used to allocate the path */ -char *kobject_get_path(struct kobject *kobj, int gfp_mask) +char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask) { char *path; int len; diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 04ca4429ddfa..3ab375411e38 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -54,7 +54,7 @@ static char *action_to_string(enum kobject_action action) static struct sock *uevent_sock; /** - * send_uevent - notify userspace by sending event trough netlink socket + * send_uevent - notify userspace by sending event through netlink socket * * @signal: signal name * @obj: object path (kobject) @@ -62,7 +62,7 @@ static struct sock *uevent_sock; * @gfp_mask: */ static int send_uevent(const char *signal, const char *obj, - char **envp, int gfp_mask) + char **envp, gfp_t gfp_mask) { struct sk_buff *skb; char *pos; @@ -98,7 +98,7 @@ static int send_uevent(const char *signal, const char *obj, } static int do_kobject_uevent(struct kobject *kobj, enum kobject_action action, - struct attribute *attr, int gfp_mask) + struct attribute *attr, gfp_t gfp_mask) { char *path; char *attrpath; diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 6a8bc6e06431..88511c3805ad 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -110,7 +110,7 @@ radix_tree_node_free(struct radix_tree_node *node) * success, return zero, with preemption disabled. On error, return -ENOMEM * with preemption not disabled. */ -int radix_tree_preload(unsigned int __nocast gfp_mask) +int radix_tree_preload(gfp_t gfp_mask) { struct radix_tree_preload *rtp; struct radix_tree_node *node; @@ -281,35 +281,60 @@ int radix_tree_insert(struct radix_tree_root *root, } EXPORT_SYMBOL(radix_tree_insert); -/** - * radix_tree_lookup - perform lookup operation on a radix tree - * @root: radix tree root - * @index: index key - * - * Lookup the item at the position @index in the radix tree @root. - */ -void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) +static inline void **__lookup_slot(struct radix_tree_root *root, + unsigned long index) { unsigned int height, shift; - struct radix_tree_node *slot; + struct radix_tree_node **slot; height = root->height; if (index > radix_tree_maxindex(height)) return NULL; shift = (height-1) * RADIX_TREE_MAP_SHIFT; - slot = root->rnode; + slot = &root->rnode; while (height > 0) { - if (slot == NULL) + if (*slot == NULL) return NULL; - slot = slot->slots[(index >> shift) & RADIX_TREE_MAP_MASK]; + slot = (struct radix_tree_node **) + ((*slot)->slots + + ((index >> shift) & RADIX_TREE_MAP_MASK)); shift -= RADIX_TREE_MAP_SHIFT; height--; } - return slot; + return (void **)slot; +} + +/** + * radix_tree_lookup_slot - lookup a slot in a radix tree + * @root: radix tree root + * @index: index key + * + * Lookup the slot corresponding to the position @index in the radix tree + * @root. This is useful for update-if-exists operations. + */ +void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) +{ + return __lookup_slot(root, index); +} +EXPORT_SYMBOL(radix_tree_lookup_slot); + +/** + * radix_tree_lookup - perform lookup operation on a radix tree + * @root: radix tree root + * @index: index key + * + * Lookup the item at the position @index in the radix tree @root. + */ +void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) +{ + void **slot; + + slot = __lookup_slot(root, index); + return slot != NULL ? *slot : NULL; } EXPORT_SYMBOL(radix_tree_lookup); diff --git a/lib/reed_solomon/Makefile b/lib/reed_solomon/Makefile index 747a2de29346..c3d7136827ed 100644 --- a/lib/reed_solomon/Makefile +++ b/lib/reed_solomon/Makefile @@ -1,5 +1,5 @@ # -# This is a modified version of reed solomon lib, +# This is a modified version of reed solomon lib, # obj-$(CONFIG_REED_SOLOMON) += reed_solomon.o diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c index d401decd6289..a58df56f09b6 100644 --- a/lib/reed_solomon/decode_rs.c +++ b/lib/reed_solomon/decode_rs.c @@ -1,22 +1,22 @@ -/* +/* * lib/reed_solomon/decode_rs.c * * Overview: * Generic Reed Solomon encoder / decoder library - * + * * Copyright 2002, Phil Karn, KA9Q * May be used under the terms of the GNU General Public License (GPL) * * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) * - * $Id: decode_rs.c,v 1.6 2004/10/22 15:41:47 gleixner Exp $ + * $Id: decode_rs.c,v 1.7 2005/11/07 11:14:59 gleixner Exp $ * */ -/* Generic data width independent code which is included by the +/* Generic data width independent code which is included by the * wrappers. */ -{ +{ int deg_lambda, el, deg_omega; int i, j, r, k, pad; int nn = rs->nn; @@ -41,9 +41,9 @@ pad = nn - nroots - len; if (pad < 0 || pad >= nn) return -ERANGE; - + /* Does the caller provide the syndrome ? */ - if (s != NULL) + if (s != NULL) goto decode; /* form the syndromes; i.e., evaluate data(x) at roots of @@ -54,11 +54,11 @@ for (j = 1; j < len; j++) { for (i = 0; i < nroots; i++) { if (syn[i] == 0) { - syn[i] = (((uint16_t) data[j]) ^ + syn[i] = (((uint16_t) data[j]) ^ invmsk) & msk; } else { syn[i] = ((((uint16_t) data[j]) ^ - invmsk) & msk) ^ + invmsk) & msk) ^ alpha_to[rs_modnn(rs, index_of[syn[i]] + (fcr + i) * prim)]; } @@ -70,7 +70,7 @@ if (syn[i] == 0) { syn[i] = ((uint16_t) par[j]) & msk; } else { - syn[i] = (((uint16_t) par[j]) & msk) ^ + syn[i] = (((uint16_t) par[j]) & msk) ^ alpha_to[rs_modnn(rs, index_of[syn[i]] + (fcr+i)*prim)]; } @@ -99,14 +99,14 @@ if (no_eras > 0) { /* Init lambda to be the erasure locator polynomial */ - lambda[1] = alpha_to[rs_modnn(rs, + lambda[1] = alpha_to[rs_modnn(rs, prim * (nn - 1 - eras_pos[0]))]; for (i = 1; i < no_eras; i++) { u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i])); for (j = i + 1; j > 0; j--) { tmp = index_of[lambda[j - 1]]; if (tmp != nn) { - lambda[j] ^= + lambda[j] ^= alpha_to[rs_modnn(rs, u + tmp)]; } } @@ -127,8 +127,8 @@ discr_r = 0; for (i = 0; i < r; i++) { if ((lambda[i] != 0) && (s[r - i - 1] != nn)) { - discr_r ^= - alpha_to[rs_modnn(rs, + discr_r ^= + alpha_to[rs_modnn(rs, index_of[lambda[i]] + s[r - i - 1])]; } @@ -143,7 +143,7 @@ t[0] = lambda[0]; for (i = 0; i < nroots; i++) { if (b[i] != nn) { - t[i + 1] = lambda[i + 1] ^ + t[i + 1] = lambda[i + 1] ^ alpha_to[rs_modnn(rs, discr_r + b[i])]; } else @@ -229,7 +229,7 @@ num1 = 0; for (i = deg_omega; i >= 0; i--) { if (omega[i] != nn) - num1 ^= alpha_to[rs_modnn(rs, omega[i] + + num1 ^= alpha_to[rs_modnn(rs, omega[i] + i * root[j])]; } num2 = alpha_to[rs_modnn(rs, root[j] * (fcr - 1) + nn)]; @@ -239,13 +239,13 @@ * lambda_pr of lambda[i] */ for (i = min(deg_lambda, nroots - 1) & ~1; i >= 0; i -= 2) { if (lambda[i + 1] != nn) { - den ^= alpha_to[rs_modnn(rs, lambda[i + 1] + + den ^= alpha_to[rs_modnn(rs, lambda[i + 1] + i * root[j])]; } } /* Apply error to data */ if (num1 != 0 && loc[j] >= pad) { - uint16_t cor = alpha_to[rs_modnn(rs,index_of[num1] + + uint16_t cor = alpha_to[rs_modnn(rs,index_of[num1] + index_of[num2] + nn - index_of[den])]; /* Store the error correction pattern, if a diff --git a/lib/reed_solomon/encode_rs.c b/lib/reed_solomon/encode_rs.c index 237bf65ae886..0b5b1a6728ec 100644 --- a/lib/reed_solomon/encode_rs.c +++ b/lib/reed_solomon/encode_rs.c @@ -1,19 +1,19 @@ -/* +/* * lib/reed_solomon/encode_rs.c * * Overview: * Generic Reed Solomon encoder / decoder library - * + * * Copyright 2002, Phil Karn, KA9Q * May be used under the terms of the GNU General Public License (GPL) * * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) * - * $Id: encode_rs.c,v 1.4 2004/10/22 15:41:47 gleixner Exp $ + * $Id: encode_rs.c,v 1.5 2005/11/07 11:14:59 gleixner Exp $ * */ -/* Generic data width independent code which is included by the +/* Generic data width independent code which is included by the * wrappers. * int encode_rsX (struct rs_control *rs, uintX_t *data, int len, uintY_t *par) */ @@ -35,16 +35,16 @@ for (i = 0; i < len; i++) { fb = index_of[((((uint16_t) data[i])^invmsk) & msk) ^ par[0]]; /* feedback term is non-zero */ - if (fb != nn) { + if (fb != nn) { for (j = 1; j < nroots; j++) { - par[j] ^= alpha_to[rs_modnn(rs, fb + + par[j] ^= alpha_to[rs_modnn(rs, fb + genpoly[nroots - j])]; } } /* Shift */ memmove(&par[0], &par[1], sizeof(uint16_t) * (nroots - 1)); if (fb != nn) { - par[nroots - 1] = alpha_to[rs_modnn(rs, + par[nroots - 1] = alpha_to[rs_modnn(rs, fb + genpoly[0])]; } else { par[nroots - 1] = 0; diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c index 6604e3b1940c..f5fef948a415 100644 --- a/lib/reed_solomon/reed_solomon.c +++ b/lib/reed_solomon/reed_solomon.c @@ -1,22 +1,22 @@ -/* +/* * lib/reed_solomon/rslib.c * * Overview: * Generic Reed Solomon encoder / decoder library - * + * * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) * * Reed Solomon code lifted from reed solomon library written by Phil Karn * Copyright 2002 Phil Karn, KA9Q * - * $Id: rslib.c,v 1.5 2004/10/22 15:41:47 gleixner Exp $ + * $Id: rslib.c,v 1.7 2005/11/07 11:14:59 gleixner Exp $ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Description: - * + * * The generic Reed Solomon library provides runtime configurable * encoding / decoding of RS codes. * Each user must call init_rs to get a pointer to a rs_control @@ -25,11 +25,11 @@ * If a structure is generated then the polynomial arrays for * fast encoding / decoding are built. This can take some time so * make sure not to call this function from a time critical path. - * Usually a module / driver should initialize the necessary + * Usually a module / driver should initialize the necessary * rs_control structure on module / driver init and release it * on exit. - * The encoding puts the calculated syndrome into a given syndrome - * buffer. + * The encoding puts the calculated syndrome into a given syndrome + * buffer. * The decoding is a two step process. The first step calculates * the syndrome over the received (data + syndrome) and calls the * second stage, which does the decoding / error correction itself. @@ -51,7 +51,7 @@ static LIST_HEAD (rslist); /* Protection for the list */ static DECLARE_MUTEX(rslistlock); -/** +/** * rs_init - Initialize a Reed-Solomon codec * * @symsize: symbol size, bits (1-8) @@ -63,7 +63,7 @@ static DECLARE_MUTEX(rslistlock); * Allocate a control structure and the polynom arrays for faster * en/decoding. Fill the arrays according to the given parameters */ -static struct rs_control *rs_init(int symsize, int gfpoly, int fcr, +static struct rs_control *rs_init(int symsize, int gfpoly, int fcr, int prim, int nroots) { struct rs_control *rs; @@ -124,15 +124,15 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int fcr, /* Multiply rs->genpoly[] by @**(root + x) */ for (j = i; j > 0; j--) { if (rs->genpoly[j] != 0) { - rs->genpoly[j] = rs->genpoly[j -1] ^ - rs->alpha_to[rs_modnn(rs, + rs->genpoly[j] = rs->genpoly[j -1] ^ + rs->alpha_to[rs_modnn(rs, rs->index_of[rs->genpoly[j]] + root)]; } else rs->genpoly[j] = rs->genpoly[j - 1]; } /* rs->genpoly[0] can never be zero */ - rs->genpoly[0] = - rs->alpha_to[rs_modnn(rs, + rs->genpoly[0] = + rs->alpha_to[rs_modnn(rs, rs->index_of[rs->genpoly[0]] + root)]; } /* convert rs->genpoly[] to index form for quicker encoding */ @@ -153,7 +153,7 @@ errrs: } -/** +/** * free_rs - Free the rs control structure, if its not longer used * * @rs: the control structure which is not longer used by the @@ -173,19 +173,19 @@ void free_rs(struct rs_control *rs) up(&rslistlock); } -/** +/** * init_rs - Find a matching or allocate a new rs control structure * * @symsize: the symbol size (number of bits) * @gfpoly: the extended Galois field generator polynomial coefficients, * with the 0th coefficient in the low order bit. The polynomial * must be primitive; - * @fcr: the first consecutive root of the rs code generator polynomial + * @fcr: the first consecutive root of the rs code generator polynomial * in index form * @prim: primitive element to generate polynomial roots * @nroots: RS code generator polynomial degree (number of roots) */ -struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, +struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, int nroots) { struct list_head *tmp; @@ -198,9 +198,9 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, return NULL; if (prim <= 0 || prim >= (1<<symsize)) return NULL; - if (nroots < 0 || nroots >= (1<<symsize) || nroots > 8) + if (nroots < 0 || nroots >= (1<<symsize)) return NULL; - + down(&rslistlock); /* Walk through the list and look for a matching entry */ @@ -211,9 +211,9 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, if (gfpoly != rs->gfpoly) continue; if (fcr != rs->fcr) - continue; + continue; if (prim != rs->prim) - continue; + continue; if (nroots != rs->nroots) continue; /* We have a matching one already */ @@ -227,18 +227,18 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, rs->users = 1; list_add(&rs->list, &rslist); } -out: +out: up(&rslistlock); return rs; } #ifdef CONFIG_REED_SOLOMON_ENC8 -/** +/** * encode_rs8 - Calculate the parity for data values (8bit data width) * * @rs: the rs control structure * @data: data field of a given type - * @len: data length + * @len: data length * @par: parity data, must be initialized by caller (usually all 0) * @invmsk: invert data mask (will be xored on data) * @@ -246,7 +246,7 @@ out: * symbol size > 8. The calling code must take care of encoding of the * syndrome result for storage itself. */ -int encode_rs8(struct rs_control *rs, uint8_t *data, int len, uint16_t *par, +int encode_rs8(struct rs_control *rs, uint8_t *data, int len, uint16_t *par, uint16_t invmsk) { #include "encode_rs.c" @@ -255,7 +255,7 @@ EXPORT_SYMBOL_GPL(encode_rs8); #endif #ifdef CONFIG_REED_SOLOMON_DEC8 -/** +/** * decode_rs8 - Decode codeword (8bit data width) * * @rs: the rs control structure @@ -273,7 +273,7 @@ EXPORT_SYMBOL_GPL(encode_rs8); * syndrome result and the received parity before calling this code. */ int decode_rs8(struct rs_control *rs, uint8_t *data, uint16_t *par, int len, - uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, + uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, uint16_t *corr) { #include "decode_rs.c" @@ -287,13 +287,13 @@ EXPORT_SYMBOL_GPL(decode_rs8); * * @rs: the rs control structure * @data: data field of a given type - * @len: data length + * @len: data length * @par: parity data, must be initialized by caller (usually all 0) * @invmsk: invert data mask (will be xored on data, not on parity!) * * Each field in the data array contains up to symbol size bits of valid data. */ -int encode_rs16(struct rs_control *rs, uint16_t *data, int len, uint16_t *par, +int encode_rs16(struct rs_control *rs, uint16_t *data, int len, uint16_t *par, uint16_t invmsk) { #include "encode_rs.c" @@ -302,7 +302,7 @@ EXPORT_SYMBOL_GPL(encode_rs16); #endif #ifdef CONFIG_REED_SOLOMON_DEC16 -/** +/** * decode_rs16 - Decode codeword (16bit data width) * * @rs: the rs control structure @@ -312,13 +312,13 @@ EXPORT_SYMBOL_GPL(encode_rs16); * @s: syndrome data field (if NULL, syndrome is calculated) * @no_eras: number of erasures * @eras_pos: position of erasures, can be NULL - * @invmsk: invert data mask (will be xored on data, not on parity!) + * @invmsk: invert data mask (will be xored on data, not on parity!) * @corr: buffer to store correction bitmask on eras_pos * * Each field in the data array contains up to symbol size bits of valid data. */ int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len, - uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, + uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, uint16_t *corr) { #include "decode_rs.c" diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 42c08ef828c5..eddc9b3d3876 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -5,6 +5,7 @@ */ #include <linux/module.h> #include <linux/kallsyms.h> +#include <linux/sched.h> unsigned int debug_smp_processor_id(void) { diff --git a/lib/sort.c b/lib/sort.c index ddc4d35df289..5f3b51ffa1dc 100644 --- a/lib/sort.c +++ b/lib/sort.c @@ -7,6 +7,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sort.h> +#include <linux/slab.h> static void u32_swap(void *a, void *b, int size) { diff --git a/lib/string.c b/lib/string.c index d886ef157c12..037a48acedbb 100644 --- a/lib/string.c +++ b/lib/string.c @@ -36,11 +36,13 @@ int strnicmp(const char *s1, const char *s2, size_t len) /* Yes, Virginia, it had better be unsigned */ unsigned char c1, c2; - c1 = 0; c2 = 0; + c1 = c2 = 0; if (len) { do { - c1 = *s1; c2 = *s2; - s1++; s2++; + c1 = *s1; + c2 = *s2; + s1++; + s2++; if (!c1) break; if (!c2) @@ -55,7 +57,6 @@ int strnicmp(const char *s1, const char *s2, size_t len) } return (int)c1 - (int)c2; } - EXPORT_SYMBOL(strnicmp); #endif @@ -66,7 +67,7 @@ EXPORT_SYMBOL(strnicmp); * @src: Where to copy the string from */ #undef strcpy -char * strcpy(char * dest,const char *src) +char *strcpy(char *dest, const char *src) { char *tmp = dest; @@ -91,12 +92,13 @@ EXPORT_SYMBOL(strcpy); * count, the remainder of @dest will be padded with %NUL. * */ -char * strncpy(char * dest,const char *src,size_t count) +char *strncpy(char *dest, const char *src, size_t count) { char *tmp = dest; while (count) { - if ((*tmp = *src) != 0) src++; + if ((*tmp = *src) != 0) + src++; tmp++; count--; } @@ -122,7 +124,7 @@ size_t strlcpy(char *dest, const char *src, size_t size) size_t ret = strlen(src); if (size) { - size_t len = (ret >= size) ? size-1 : ret; + size_t len = (ret >= size) ? size - 1 : ret; memcpy(dest, src, len); dest[len] = '\0'; } @@ -138,7 +140,7 @@ EXPORT_SYMBOL(strlcpy); * @src: The string to append to it */ #undef strcat -char * strcat(char * dest, const char * src) +char *strcat(char *dest, const char *src) { char *tmp = dest; @@ -146,7 +148,6 @@ char * strcat(char * dest, const char * src) dest++; while ((*dest++ = *src++) != '\0') ; - return tmp; } EXPORT_SYMBOL(strcat); @@ -162,7 +163,7 @@ EXPORT_SYMBOL(strcat); * Note that in contrast to strncpy, strncat ensures the result is * terminated. */ -char * strncat(char *dest, const char *src, size_t count) +char *strncat(char *dest, const char *src, size_t count) { char *tmp = dest; @@ -176,7 +177,6 @@ char * strncat(char *dest, const char *src, size_t count) } } } - return tmp; } EXPORT_SYMBOL(strncat); @@ -216,15 +216,14 @@ EXPORT_SYMBOL(strlcat); * @ct: Another string */ #undef strcmp -int strcmp(const char * cs,const char * ct) +int strcmp(const char *cs, const char *ct) { - register signed char __res; + signed char __res; while (1) { if ((__res = *cs - *ct++) != 0 || !*cs++) break; } - return __res; } EXPORT_SYMBOL(strcmp); @@ -237,16 +236,15 @@ EXPORT_SYMBOL(strcmp); * @ct: Another string * @count: The maximum number of bytes to compare */ -int strncmp(const char * cs,const char * ct,size_t count) +int strncmp(const char *cs, const char *ct, size_t count) { - register signed char __res = 0; + signed char __res = 0; while (count) { if ((__res = *cs - *ct++) != 0 || !*cs++) break; count--; } - return __res; } EXPORT_SYMBOL(strncmp); @@ -258,12 +256,12 @@ EXPORT_SYMBOL(strncmp); * @s: The string to be searched * @c: The character to search for */ -char * strchr(const char * s, int c) +char *strchr(const char *s, int c) { - for(; *s != (char) c; ++s) + for (; *s != (char)c; ++s) if (*s == '\0') return NULL; - return (char *) s; + return (char *)s; } EXPORT_SYMBOL(strchr); #endif @@ -274,7 +272,7 @@ EXPORT_SYMBOL(strchr); * @s: The string to be searched * @c: The character to search for */ -char * strrchr(const char * s, int c) +char *strrchr(const char *s, int c) { const char *p = s + strlen(s); do { @@ -296,8 +294,8 @@ EXPORT_SYMBOL(strrchr); char *strnchr(const char *s, size_t count, int c) { for (; count-- && *s != '\0'; ++s) - if (*s == (char) c) - return (char *) s; + if (*s == (char)c) + return (char *)s; return NULL; } EXPORT_SYMBOL(strnchr); @@ -308,7 +306,7 @@ EXPORT_SYMBOL(strnchr); * strlen - Find the length of a string * @s: The string to be sized */ -size_t strlen(const char * s) +size_t strlen(const char *s) { const char *sc; @@ -325,7 +323,7 @@ EXPORT_SYMBOL(strlen); * @s: The string to be sized * @count: The maximum number of bytes to search */ -size_t strnlen(const char * s, size_t count) +size_t strnlen(const char *s, size_t count) { const char *sc; @@ -358,7 +356,6 @@ size_t strspn(const char *s, const char *accept) return count; ++count; } - return count; } @@ -384,9 +381,8 @@ size_t strcspn(const char *s, const char *reject) } ++count; } - return count; -} +} EXPORT_SYMBOL(strcspn); #ifndef __HAVE_ARCH_STRPBRK @@ -395,14 +391,14 @@ EXPORT_SYMBOL(strcspn); * @cs: The string to be searched * @ct: The characters to search for */ -char * strpbrk(const char * cs,const char * ct) +char *strpbrk(const char *cs, const char *ct) { - const char *sc1,*sc2; + const char *sc1, *sc2; - for( sc1 = cs; *sc1 != '\0'; ++sc1) { - for( sc2 = ct; *sc2 != '\0'; ++sc2) { + for (sc1 = cs; *sc1 != '\0'; ++sc1) { + for (sc2 = ct; *sc2 != '\0'; ++sc2) { if (*sc1 == *sc2) - return (char *) sc1; + return (char *)sc1; } } return NULL; @@ -422,9 +418,10 @@ EXPORT_SYMBOL(strpbrk); * of that name. In fact, it was stolen from glibc2 and de-fancy-fied. * Same semantics, slimmer shape. ;) */ -char * strsep(char **s, const char *ct) +char *strsep(char **s, const char *ct) { - char *sbegin = *s, *end; + char *sbegin = *s; + char *end; if (sbegin == NULL) return NULL; @@ -433,10 +430,8 @@ char * strsep(char **s, const char *ct) if (end) *end++ = '\0'; *s = end; - return sbegin; } - EXPORT_SYMBOL(strsep); #endif @@ -449,13 +444,12 @@ EXPORT_SYMBOL(strsep); * * Do not use memset() to access IO space, use memset_io() instead. */ -void * memset(void * s,int c,size_t count) +void *memset(void *s, int c, size_t count) { - char *xs = (char *) s; + char *xs = s; while (count--) *xs++ = c; - return s; } EXPORT_SYMBOL(memset); @@ -471,13 +465,13 @@ EXPORT_SYMBOL(memset); * You should not use this function to access IO space, use memcpy_toio() * or memcpy_fromio() instead. */ -void * memcpy(void * dest,const void *src,size_t count) +void *memcpy(void *dest, const void *src, size_t count) { - char *tmp = (char *) dest, *s = (char *) src; + char *tmp = dest; + char *s = src; while (count--) *tmp++ = *s++; - return dest; } EXPORT_SYMBOL(memcpy); @@ -492,23 +486,24 @@ EXPORT_SYMBOL(memcpy); * * Unlike memcpy(), memmove() copes with overlapping areas. */ -void * memmove(void * dest,const void *src,size_t count) +void *memmove(void *dest, const void *src, size_t count) { - char *tmp, *s; + char *tmp; + const char *s; if (dest <= src) { - tmp = (char *) dest; - s = (char *) src; + tmp = dest; + s = src; while (count--) *tmp++ = *s++; - } - else { - tmp = (char *) dest + count; - s = (char *) src + count; + } else { + tmp = dest; + tmp += count; + s = src; + s += count; while (count--) *--tmp = *--s; - } - + } return dest; } EXPORT_SYMBOL(memmove); @@ -522,12 +517,12 @@ EXPORT_SYMBOL(memmove); * @count: The size of the area. */ #undef memcmp -int memcmp(const void * cs,const void * ct,size_t count) +int memcmp(const void *cs, const void *ct, size_t count) { const unsigned char *su1, *su2; int res = 0; - for( su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) + for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) if ((res = *su1 - *su2) != 0) break; return res; @@ -545,17 +540,17 @@ EXPORT_SYMBOL(memcmp); * returns the address of the first occurrence of @c, or 1 byte past * the area if @c is not found */ -void * memscan(void * addr, int c, size_t size) +void *memscan(void *addr, int c, size_t size) { - unsigned char * p = (unsigned char *) addr; + unsigned char *p = addr; while (size) { if (*p == c) - return (void *) p; + return (void *)p; p++; size--; } - return (void *) p; + return (void *)p; } EXPORT_SYMBOL(memscan); #endif @@ -566,18 +561,18 @@ EXPORT_SYMBOL(memscan); * @s1: The string to be searched * @s2: The string to search for */ -char * strstr(const char * s1,const char * s2) +char *strstr(const char *s1, const char *s2) { int l1, l2; l2 = strlen(s2); if (!l2) - return (char *) s1; + return (char *)s1; l1 = strlen(s1); while (l1 >= l2) { l1--; - if (!memcmp(s1,s2,l2)) - return (char *) s1; + if (!memcmp(s1, s2, l2)) + return (char *)s1; s1++; } return NULL; @@ -600,7 +595,7 @@ void *memchr(const void *s, int c, size_t n) const unsigned char *p = s; while (n-- != 0) { if ((unsigned char)c == *p++) { - return (void *)(p-1); + return (void *)(p - 1); } } return NULL; diff --git a/lib/swiotlb.c b/lib/swiotlb.c new file mode 100644 index 000000000000..57216f3544ca --- /dev/null +++ b/lib/swiotlb.c @@ -0,0 +1,811 @@ +/* + * Dynamic DMA mapping support. + * + * This implementation is for IA-64 and EM64T platforms that do not support + * I/O TLBs (aka DMA address translation hardware). + * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> + * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> + * Copyright (C) 2000, 2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * + * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. + * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid + * unnecessary i-cache flushing. + * 04/07/.. ak Better overflow handling. Assorted fixes. + * 05/09/10 linville Add support for syncing ranges, support syncing for + * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. + */ + +#include <linux/cache.h> +#include <linux/dma-mapping.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ctype.h> + +#include <asm/io.h> +#include <asm/dma.h> +#include <asm/scatterlist.h> + +#include <linux/init.h> +#include <linux/bootmem.h> + +#define OFFSET(val,align) ((unsigned long) \ + ( (val) & ( (align) - 1))) + +#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) +#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) + +/* + * Maximum allowable number of contiguous slabs to map, + * must be a power of 2. What is the appropriate value ? + * The complexity of {map,unmap}_single is linearly dependent on this value. + */ +#define IO_TLB_SEGSIZE 128 + +/* + * log of the size of each IO TLB slab. The number of slabs is command line + * controllable. + */ +#define IO_TLB_SHIFT 11 + +#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) + +/* + * Minimum IO TLB size to bother booting with. Systems with mainly + * 64bit capable cards will only lightly use the swiotlb. If we can't + * allocate a contiguous 1MB, we're probably in trouble anyway. + */ +#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) + +/* + * Enumeration for sync targets + */ +enum dma_sync_target { + SYNC_FOR_CPU = 0, + SYNC_FOR_DEVICE = 1, +}; + +int swiotlb_force; + +/* + * Used to do a quick range check in swiotlb_unmap_single and + * swiotlb_sync_single_*, to see if the memory was in fact allocated by this + * API. + */ +static char *io_tlb_start, *io_tlb_end; + +/* + * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and + * io_tlb_end. This is command line adjustable via setup_io_tlb_npages. + */ +static unsigned long io_tlb_nslabs; + +/* + * When the IOMMU overflows we return a fallback buffer. This sets the size. + */ +static unsigned long io_tlb_overflow = 32*1024; + +void *io_tlb_overflow_buffer; + +/* + * This is a free list describing the number of free entries available from + * each index + */ +static unsigned int *io_tlb_list; +static unsigned int io_tlb_index; + +/* + * We need to save away the original address corresponding to a mapped entry + * for the sync operations. + */ +static unsigned char **io_tlb_orig_addr; + +/* + * Protect the above data structures in the map and unmap calls + */ +static DEFINE_SPINLOCK(io_tlb_lock); + +static int __init +setup_io_tlb_npages(char *str) +{ + if (isdigit(*str)) { + io_tlb_nslabs = simple_strtoul(str, &str, 0); + /* avoid tail segment of size < IO_TLB_SEGSIZE */ + io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + } + if (*str == ',') + ++str; + if (!strcmp(str, "force")) + swiotlb_force = 1; + return 1; +} +__setup("swiotlb=", setup_io_tlb_npages); +/* make io_tlb_overflow tunable too? */ + +/* + * Statically reserve bounce buffer space and initialize bounce buffer data + * structures for the software IO TLB used to implement the DMA API. + */ +void +swiotlb_init_with_default_size (size_t default_size) +{ + unsigned long i; + + if (!io_tlb_nslabs) { + io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); + io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + } + + /* + * Get IO TLB memory from the low pages + */ + io_tlb_start = alloc_bootmem_low_pages_limit(io_tlb_nslabs * + (1 << IO_TLB_SHIFT), 0x100000000); + if (!io_tlb_start) + panic("Cannot allocate SWIOTLB buffer"); + io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); + + /* + * Allocate and initialize the free list array. This array is used + * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE + * between io_tlb_start and io_tlb_end. + */ + io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); + for (i = 0; i < io_tlb_nslabs; i++) + io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); + io_tlb_index = 0; + io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *)); + + /* + * Get the overflow emergency buffer + */ + io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); + printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", + virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); +} + +void +swiotlb_init (void) +{ + swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ +} + +/* + * Systems with larger DMA zones (those that don't support ISA) can + * initialize the swiotlb later using the slab allocator if needed. + * This should be just like above, but with some error catching. + */ +int +swiotlb_late_init_with_default_size (size_t default_size) +{ + unsigned long i, req_nslabs = io_tlb_nslabs; + unsigned int order; + + if (!io_tlb_nslabs) { + io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); + io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + } + + /* + * Get IO TLB memory from the low pages + */ + order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); + io_tlb_nslabs = SLABS_PER_PAGE << order; + + while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { + io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, + order); + if (io_tlb_start) + break; + order--; + } + + if (!io_tlb_start) + goto cleanup1; + + if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) { + printk(KERN_WARNING "Warning: only able to allocate %ld MB " + "for software IO TLB\n", (PAGE_SIZE << order) >> 20); + io_tlb_nslabs = SLABS_PER_PAGE << order; + } + io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); + memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT)); + + /* + * Allocate and initialize the free list array. This array is used + * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE + * between io_tlb_start and io_tlb_end. + */ + io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, + get_order(io_tlb_nslabs * sizeof(int))); + if (!io_tlb_list) + goto cleanup2; + + for (i = 0; i < io_tlb_nslabs; i++) + io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); + io_tlb_index = 0; + + io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, + get_order(io_tlb_nslabs * sizeof(char *))); + if (!io_tlb_orig_addr) + goto cleanup3; + + memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); + + /* + * Get the overflow emergency buffer + */ + io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA, + get_order(io_tlb_overflow)); + if (!io_tlb_overflow_buffer) + goto cleanup4; + + printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " + "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20, + virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); + + return 0; + +cleanup4: + free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * + sizeof(char *))); + io_tlb_orig_addr = NULL; +cleanup3: + free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * + sizeof(int))); + io_tlb_list = NULL; + io_tlb_end = NULL; +cleanup2: + free_pages((unsigned long)io_tlb_start, order); + io_tlb_start = NULL; +cleanup1: + io_tlb_nslabs = req_nslabs; + return -ENOMEM; +} + +static inline int +address_needs_mapping(struct device *hwdev, dma_addr_t addr) +{ + dma_addr_t mask = 0xffffffff; + /* If the device has a mask, use it, otherwise default to 32 bits */ + if (hwdev && hwdev->dma_mask) + mask = *hwdev->dma_mask; + return (addr & ~mask) != 0; +} + +/* + * Allocates bounce buffer and returns its kernel virtual address. + */ +static void * +map_single(struct device *hwdev, char *buffer, size_t size, int dir) +{ + unsigned long flags; + char *dma_addr; + unsigned int nslots, stride, index, wrap; + int i; + + /* + * For mappings greater than a page, we limit the stride (and + * hence alignment) to a page size. + */ + nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; + if (size > PAGE_SIZE) + stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); + else + stride = 1; + + if (!nslots) + BUG(); + + /* + * Find suitable number of IO TLB entries size that will fit this + * request and allocate a buffer from that IO TLB pool. + */ + spin_lock_irqsave(&io_tlb_lock, flags); + { + wrap = index = ALIGN(io_tlb_index, stride); + + if (index >= io_tlb_nslabs) + wrap = index = 0; + + do { + /* + * If we find a slot that indicates we have 'nslots' + * number of contiguous buffers, we allocate the + * buffers from that slot and mark the entries as '0' + * indicating unavailable. + */ + if (io_tlb_list[index] >= nslots) { + int count = 0; + + for (i = index; i < (int) (index + nslots); i++) + io_tlb_list[i] = 0; + for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) + io_tlb_list[i] = ++count; + dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); + + /* + * Update the indices to avoid searching in + * the next round. + */ + io_tlb_index = ((index + nslots) < io_tlb_nslabs + ? (index + nslots) : 0); + + goto found; + } + index += stride; + if (index >= io_tlb_nslabs) + index = 0; + } while (index != wrap); + + spin_unlock_irqrestore(&io_tlb_lock, flags); + return NULL; + } + found: + spin_unlock_irqrestore(&io_tlb_lock, flags); + + /* + * Save away the mapping from the original address to the DMA address. + * This is needed when we sync the memory. Then we sync the buffer if + * needed. + */ + io_tlb_orig_addr[index] = buffer; + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) + memcpy(dma_addr, buffer, size); + + return dma_addr; +} + +/* + * dma_addr is the kernel virtual address of the bounce buffer to unmap. + */ +static void +unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) +{ + unsigned long flags; + int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; + int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; + char *buffer = io_tlb_orig_addr[index]; + + /* + * First, sync the memory before unmapping the entry + */ + if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) + /* + * bounce... copy the data back into the original buffer * and + * delete the bounce buffer. + */ + memcpy(buffer, dma_addr, size); + + /* + * Return the buffer to the free list by setting the corresponding + * entries to indicate the number of contigous entries available. + * While returning the entries to the free list, we merge the entries + * with slots below and above the pool being returned. + */ + spin_lock_irqsave(&io_tlb_lock, flags); + { + count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ? + io_tlb_list[index + nslots] : 0); + /* + * Step 1: return the slots to the free list, merging the + * slots with superceeding slots + */ + for (i = index + nslots - 1; i >= index; i--) + io_tlb_list[i] = ++count; + /* + * Step 2: merge the returned slots with the preceding slots, + * if available (non zero) + */ + for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) + io_tlb_list[i] = ++count; + } + spin_unlock_irqrestore(&io_tlb_lock, flags); +} + +static void +sync_single(struct device *hwdev, char *dma_addr, size_t size, + int dir, int target) +{ + int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; + char *buffer = io_tlb_orig_addr[index]; + + switch (target) { + case SYNC_FOR_CPU: + if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) + memcpy(buffer, dma_addr, size); + else if (dir != DMA_TO_DEVICE) + BUG(); + break; + case SYNC_FOR_DEVICE: + if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) + memcpy(dma_addr, buffer, size); + else if (dir != DMA_FROM_DEVICE) + BUG(); + break; + default: + BUG(); + } +} + +void * +swiotlb_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) +{ + unsigned long dev_addr; + void *ret; + int order = get_order(size); + + /* + * XXX fix me: the DMA API should pass us an explicit DMA mask + * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32 + * bit range instead of a 16MB one). + */ + flags |= GFP_DMA; + + ret = (void *)__get_free_pages(flags, order); + if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) { + /* + * The allocated memory isn't reachable by the device. + * Fall back on swiotlb_map_single(). + */ + free_pages((unsigned long) ret, order); + ret = NULL; + } + if (!ret) { + /* + * We are either out of memory or the device can't DMA + * to GFP_DMA memory; fall back on + * swiotlb_map_single(), which will grab memory from + * the lowest available address range. + */ + dma_addr_t handle; + handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); + if (dma_mapping_error(handle)) + return NULL; + + ret = phys_to_virt(handle); + } + + memset(ret, 0, size); + dev_addr = virt_to_phys(ret); + + /* Confirm address can be DMA'd by device */ + if (address_needs_mapping(hwdev, dev_addr)) { + printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n", + (unsigned long long)*hwdev->dma_mask, dev_addr); + panic("swiotlb_alloc_coherent: allocated memory is out of " + "range for device"); + } + *dma_handle = dev_addr; + return ret; +} + +void +swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, + dma_addr_t dma_handle) +{ + if (!(vaddr >= (void *)io_tlb_start + && vaddr < (void *)io_tlb_end)) + free_pages((unsigned long) vaddr, get_order(size)); + else + /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ + swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); +} + +static void +swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) +{ + /* + * Ran out of IOMMU space for this operation. This is very bad. + * Unfortunately the drivers cannot handle this operation properly. + * unless they check for dma_mapping_error (most don't) + * When the mapping is small enough return a static buffer to limit + * the damage, or panic when the transfer is too big. + */ + printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at " + "device %s\n", size, dev ? dev->bus_id : "?"); + + if (size > io_tlb_overflow && do_panic) { + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) + panic("DMA: Memory would be corrupted\n"); + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) + panic("DMA: Random memory would be DMAed\n"); + } +} + +/* + * Map a single buffer of the indicated size for DMA in streaming mode. The + * physical address to use is returned. + * + * Once the device is given the dma address, the device owns this memory until + * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. + */ +dma_addr_t +swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) +{ + unsigned long dev_addr = virt_to_phys(ptr); + void *map; + + if (dir == DMA_NONE) + BUG(); + /* + * If the pointer passed in happens to be in the device's DMA window, + * we can safely return the device addr and not worry about bounce + * buffering it. + */ + if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) + return dev_addr; + + /* + * Oh well, have to allocate and map a bounce buffer. + */ + map = map_single(hwdev, ptr, size, dir); + if (!map) { + swiotlb_full(hwdev, size, dir, 1); + map = io_tlb_overflow_buffer; + } + + dev_addr = virt_to_phys(map); + + /* + * Ensure that the address returned is DMA'ble + */ + if (address_needs_mapping(hwdev, dev_addr)) + panic("map_single: bounce buffer is not DMA'ble"); + + return dev_addr; +} + +/* + * Since DMA is i-cache coherent, any (complete) pages that were written via + * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to + * flush them when they get mapped into an executable vm-area. + */ +static void +mark_clean(void *addr, size_t size) +{ + unsigned long pg_addr, end; + + pg_addr = PAGE_ALIGN((unsigned long) addr); + end = (unsigned long) addr + size; + while (pg_addr + PAGE_SIZE <= end) { + struct page *page = virt_to_page(pg_addr); + set_bit(PG_arch_1, &page->flags); + pg_addr += PAGE_SIZE; + } +} + +/* + * Unmap a single streaming mode DMA translation. The dma_addr and size must + * match what was provided for in a previous swiotlb_map_single call. All + * other usages are undefined. + * + * After this call, reads by the cpu to the buffer are guaranteed to see + * whatever the device wrote there. + */ +void +swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, + int dir) +{ + char *dma_addr = phys_to_virt(dev_addr); + + if (dir == DMA_NONE) + BUG(); + if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) + unmap_single(hwdev, dma_addr, size, dir); + else if (dir == DMA_FROM_DEVICE) + mark_clean(dma_addr, size); +} + +/* + * Make physical memory consistent for a single streaming mode DMA translation + * after a transfer. + * + * If you perform a swiotlb_map_single() but wish to interrogate the buffer + * using the cpu, yet do not wish to teardown the dma mapping, you must + * call this function before doing so. At the next point you give the dma + * address back to the card, you must first perform a + * swiotlb_dma_sync_for_device, and then the device again owns the buffer + */ +static inline void +swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir, int target) +{ + char *dma_addr = phys_to_virt(dev_addr); + + if (dir == DMA_NONE) + BUG(); + if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) + sync_single(hwdev, dma_addr, size, dir, target); + else if (dir == DMA_FROM_DEVICE) + mark_clean(dma_addr, size); +} + +void +swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir) +{ + swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); +} + +void +swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir) +{ + swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); +} + +/* + * Same as above, but for a sub-range of the mapping. + */ +static inline void +swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, + unsigned long offset, size_t size, + int dir, int target) +{ + char *dma_addr = phys_to_virt(dev_addr) + offset; + + if (dir == DMA_NONE) + BUG(); + if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) + sync_single(hwdev, dma_addr, size, dir, target); + else if (dir == DMA_FROM_DEVICE) + mark_clean(dma_addr, size); +} + +void +swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, + unsigned long offset, size_t size, int dir) +{ + swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, + SYNC_FOR_CPU); +} + +void +swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, + unsigned long offset, size_t size, int dir) +{ + swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, + SYNC_FOR_DEVICE); +} + +/* + * Map a set of buffers described by scatterlist in streaming mode for DMA. + * This is the scatter-gather version of the above swiotlb_map_single + * interface. Here the scatter gather list elements are each tagged with the + * appropriate dma address and length. They are obtained via + * sg_dma_{address,length}(SG). + * + * NOTE: An implementation may be able to use a smaller number of + * DMA address/length pairs than there are SG table elements. + * (for example via virtual mapping capabilities) + * The routine returns the number of addr/length pairs actually + * used, at most nents. + * + * Device ownership issues as mentioned above for swiotlb_map_single are the + * same here. + */ +int +swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, + int dir) +{ + void *addr; + unsigned long dev_addr; + int i; + + if (dir == DMA_NONE) + BUG(); + + for (i = 0; i < nelems; i++, sg++) { + addr = SG_ENT_VIRT_ADDRESS(sg); + dev_addr = virt_to_phys(addr); + if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { + sg->dma_address = (dma_addr_t) virt_to_phys(map_single(hwdev, addr, sg->length, dir)); + if (!sg->dma_address) { + /* Don't panic here, we expect map_sg users + to do proper error handling. */ + swiotlb_full(hwdev, sg->length, dir, 0); + swiotlb_unmap_sg(hwdev, sg - i, i, dir); + sg[0].dma_length = 0; + return 0; + } + } else + sg->dma_address = dev_addr; + sg->dma_length = sg->length; + } + return nelems; +} + +/* + * Unmap a set of streaming mode DMA translations. Again, cpu read rules + * concerning calls here are the same as for swiotlb_unmap_single() above. + */ +void +swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems, + int dir) +{ + int i; + + if (dir == DMA_NONE) + BUG(); + + for (i = 0; i < nelems; i++, sg++) + if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) + unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir); + else if (dir == DMA_FROM_DEVICE) + mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); +} + +/* + * Make physical memory consistent for a set of streaming mode DMA translations + * after a transfer. + * + * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules + * and usage. + */ +static inline void +swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, + int nelems, int dir, int target) +{ + int i; + + if (dir == DMA_NONE) + BUG(); + + for (i = 0; i < nelems; i++, sg++) + if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) + sync_single(hwdev, (void *) sg->dma_address, + sg->dma_length, dir, target); +} + +void +swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, + int nelems, int dir) +{ + swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); +} + +void +swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, + int nelems, int dir) +{ + swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); +} + +int +swiotlb_dma_mapping_error(dma_addr_t dma_addr) +{ + return (dma_addr == virt_to_phys(io_tlb_overflow_buffer)); +} + +/* + * Return whether the given device DMA address mask can be supported + * properly. For example, if your device can only drive the low 24-bits + * during bus mastering, then you would pass 0x00ffffff as the mask to + * this function. + */ +int +swiotlb_dma_supported (struct device *hwdev, u64 mask) +{ + return (virt_to_phys (io_tlb_end) - 1) <= mask; +} + +EXPORT_SYMBOL(swiotlb_init); +EXPORT_SYMBOL(swiotlb_map_single); +EXPORT_SYMBOL(swiotlb_unmap_single); +EXPORT_SYMBOL(swiotlb_map_sg); +EXPORT_SYMBOL(swiotlb_unmap_sg); +EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); +EXPORT_SYMBOL(swiotlb_sync_single_for_device); +EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); +EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); +EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); +EXPORT_SYMBOL(swiotlb_sync_sg_for_device); +EXPORT_SYMBOL(swiotlb_dma_mapping_error); +EXPORT_SYMBOL(swiotlb_alloc_coherent); +EXPORT_SYMBOL(swiotlb_free_coherent); +EXPORT_SYMBOL(swiotlb_dma_supported); diff --git a/lib/textsearch.c b/lib/textsearch.c index 1e934c196f0f..6f3093efbd7b 100644 --- a/lib/textsearch.c +++ b/lib/textsearch.c @@ -254,7 +254,7 @@ unsigned int textsearch_find_continuous(struct ts_config *conf, * parameters or a ERR_PTR(). */ struct ts_config *textsearch_prepare(const char *algo, const void *pattern, - unsigned int len, int gfp_mask, int flags) + unsigned int len, gfp_t gfp_mask, int flags) { int err = -ENOENT; struct ts_config *conf; diff --git a/lib/ts_bm.c b/lib/ts_bm.c index 2cc79112ecc3..8a8b3a16133e 100644 --- a/lib/ts_bm.c +++ b/lib/ts_bm.c @@ -127,7 +127,7 @@ static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern, } static struct ts_config *bm_init(const void *pattern, unsigned int len, - int gfp_mask) + gfp_t gfp_mask) { struct ts_config *conf; struct ts_bm *bm; diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c index d27c0a072940..ca3211206eef 100644 --- a/lib/ts_fsm.c +++ b/lib/ts_fsm.c @@ -258,7 +258,7 @@ found_match: } static struct ts_config *fsm_init(const void *pattern, unsigned int len, - int gfp_mask) + gfp_t gfp_mask) { int i, err = -EINVAL; struct ts_config *conf; diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c index 73266b975585..7fd45451b44a 100644 --- a/lib/ts_kmp.c +++ b/lib/ts_kmp.c @@ -87,7 +87,7 @@ static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len, } static struct ts_config *kmp_init(const void *pattern, unsigned int len, - int gfp_mask) + gfp_t gfp_mask) { struct ts_config *conf; struct ts_kmp *kmp; diff --git a/lib/vsprintf.c b/lib/vsprintf.c index e4e9031dd9c3..b07db5ca3f66 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -23,6 +23,7 @@ #include <linux/ctype.h> #include <linux/kernel.h> +#include <asm/page.h> /* for PAGE_SIZE */ #include <asm/div64.h> /** diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c index 3d94cb90c1d3..31b9e9054bf7 100644 --- a/lib/zlib_inflate/inflate.c +++ b/lib/zlib_inflate/inflate.c @@ -3,7 +3,6 @@ * For conditions of distribution and use, see copyright notice in zlib.h */ -#include <linux/module.h> #include <linux/zutil.h> #include "infblock.h" #include "infutil.h" |