summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/assoc_array.c4
-rw-r--r--lib/bitmap.c4
-rw-r--r--lib/dma-debug.c2
-rw-r--r--lib/dynamic_debug.c50
-rw-r--r--lib/genalloc.c1
-rw-r--r--lib/hweight.c4
-rw-r--r--lib/idr.c2
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c103
-rw-r--r--lib/percpu-refcount.c16
-rw-r--r--lib/rhashtable.c13
-rw-r--r--lib/string.c4
-rw-r--r--lib/vsprintf.c2
13 files changed, 115 insertions, 93 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index a5ce0c7f6c30..54cf309a92a5 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -51,6 +51,9 @@ config PERCPU_RWSEM
config ARCH_USE_CMPXCHG_LOCKREF
bool
+config ARCH_HAS_FAST_MULTIPLIER
+ bool
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index ae146f0734eb..2404d03e251a 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -1723,11 +1723,13 @@ ascend_old_tree:
shortcut = assoc_array_ptr_to_shortcut(ptr);
slot = shortcut->parent_slot;
cursor = shortcut->back_pointer;
+ if (!cursor)
+ goto gc_complete;
} else {
slot = node->parent_slot;
cursor = ptr;
}
- BUG_ON(!ptr);
+ BUG_ON(!cursor);
node = assoc_array_ptr_to_node(cursor);
slot++;
goto continue_node;
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 1e031f2c9aba..cd250a2e14cb 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -884,7 +884,7 @@ EXPORT_SYMBOL(bitmap_bitremap);
* read it, you're overqualified for your current job.)
*
* In other words, @orig is mapped onto (surjectively) @dst,
- * using the the map { <n, m> | the n-th bit of @relmap is the
+ * using the map { <n, m> | the n-th bit of @relmap is the
* m-th set bit of @relmap }.
*
* Any set bits in @orig above bit number W, where W is the
@@ -932,7 +932,7 @@ EXPORT_SYMBOL(bitmap_bitremap);
*
* Further lets say we use the following code, invoking
* bitmap_fold() then bitmap_onto, as suggested above to
- * avoid the possitility of an empty @dst result:
+ * avoid the possibility of an empty @dst result:
*
* unsigned long *tmp; // a temporary bitmap's bits
*
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 98f2d7e91a91..add80cc02dbe 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -1149,7 +1149,7 @@ static void check_unmap(struct dma_debug_entry *ref)
static void check_for_stack(struct device *dev, void *addr)
{
if (object_is_on_stack(addr))
- err_printk(dev, NULL, "DMA-API: device driver maps memory from"
+ err_printk(dev, NULL, "DMA-API: device driver maps memory from "
"stack [addr=%p]\n", addr);
}
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index c9afbe2c445a..31fe79e31ab8 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -537,10 +537,9 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
return buf;
}
-int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
+void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
{
va_list args;
- int res;
struct va_format vaf;
char buf[PREFIX_SIZE];
@@ -552,21 +551,17 @@ int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &args;
- res = printk(KERN_DEBUG "%s%pV",
- dynamic_emit_prefix(descriptor, buf), &vaf);
+ printk(KERN_DEBUG "%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf);
va_end(args);
-
- return res;
}
EXPORT_SYMBOL(__dynamic_pr_debug);
-int __dynamic_dev_dbg(struct _ddebug *descriptor,
+void __dynamic_dev_dbg(struct _ddebug *descriptor,
const struct device *dev, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int res;
BUG_ON(!descriptor);
BUG_ON(!fmt);
@@ -577,30 +572,27 @@ int __dynamic_dev_dbg(struct _ddebug *descriptor,
vaf.va = &args;
if (!dev) {
- res = printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
+ printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
} else {
char buf[PREFIX_SIZE];
- res = dev_printk_emit(7, dev, "%s%s %s: %pV",
- dynamic_emit_prefix(descriptor, buf),
- dev_driver_string(dev), dev_name(dev),
- &vaf);
+ dev_printk_emit(7, dev, "%s%s %s: %pV",
+ dynamic_emit_prefix(descriptor, buf),
+ dev_driver_string(dev), dev_name(dev),
+ &vaf);
}
va_end(args);
-
- return res;
}
EXPORT_SYMBOL(__dynamic_dev_dbg);
#ifdef CONFIG_NET
-int __dynamic_netdev_dbg(struct _ddebug *descriptor,
- const struct net_device *dev, const char *fmt, ...)
+void __dynamic_netdev_dbg(struct _ddebug *descriptor,
+ const struct net_device *dev, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int res;
BUG_ON(!descriptor);
BUG_ON(!fmt);
@@ -613,23 +605,21 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor,
if (dev && dev->dev.parent) {
char buf[PREFIX_SIZE];
- res = dev_printk_emit(7, dev->dev.parent,
- "%s%s %s %s%s: %pV",
- dynamic_emit_prefix(descriptor, buf),
- dev_driver_string(dev->dev.parent),
- dev_name(dev->dev.parent),
- netdev_name(dev), netdev_reg_state(dev),
- &vaf);
+ dev_printk_emit(7, dev->dev.parent,
+ "%s%s %s %s%s: %pV",
+ dynamic_emit_prefix(descriptor, buf),
+ dev_driver_string(dev->dev.parent),
+ dev_name(dev->dev.parent),
+ netdev_name(dev), netdev_reg_state(dev),
+ &vaf);
} else if (dev) {
- res = printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev),
- netdev_reg_state(dev), &vaf);
+ printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev),
+ netdev_reg_state(dev), &vaf);
} else {
- res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
+ printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
}
va_end(args);
-
- return res;
}
EXPORT_SYMBOL(__dynamic_netdev_dbg);
diff --git a/lib/genalloc.c b/lib/genalloc.c
index bdb9a456bcbb..38d2db82228c 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -588,6 +588,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np,
if (!np_pool)
return NULL;
pdev = of_find_device_by_node(np_pool);
+ of_node_put(np_pool);
if (!pdev)
return NULL;
return dev_get_gen_pool(&pdev->dev);
diff --git a/lib/hweight.c b/lib/hweight.c
index b7d81ba143d1..9a5c1f221558 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -11,7 +11,7 @@
unsigned int __sw_hweight32(unsigned int w)
{
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x55555555;
w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
w = (w + (w >> 4)) & 0x0f0f0f0f;
@@ -49,7 +49,7 @@ unsigned long __sw_hweight64(__u64 w)
return __sw_hweight32((unsigned int)(w >> 32)) +
__sw_hweight32((unsigned int)w);
#elif BITS_PER_LONG == 64
-#ifdef ARCH_HAS_FAST_MULTIPLIER
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x5555555555555555ul;
w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
diff --git a/lib/idr.c b/lib/idr.c
index 50be3fa9b657..e654aebd5f80 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -626,7 +626,7 @@ static void __idr_remove_all(struct idr *idp)
* idr_destroy().
*
* A typical clean-up sequence for objects stored in an idr tree will use
- * idr_for_each() to free all objects, if necessay, then idr_destroy() to
+ * idr_for_each() to free all objects, if necessary, then idr_destroy() to
* free up the id mappings and cached idr_layers.
*/
void idr_destroy(struct idr *idp)
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index 8563081e8da3..a1c387f6afba 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -19,31 +19,21 @@
#include <linux/lzo.h>
#include "lzodefs.h"
-#define HAVE_IP(t, x) \
- (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
- (((t + x) >= t) && ((t + x) >= x)))
+#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
+#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
+#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
+#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
+#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
-#define HAVE_OP(t, x) \
- (((size_t)(op_end - op) >= (size_t)(t + x)) && \
- (((t + x) >= t) && ((t + x) >= x)))
-
-#define NEED_IP(t, x) \
- do { \
- if (!HAVE_IP(t, x)) \
- goto input_overrun; \
- } while (0)
-
-#define NEED_OP(t, x) \
- do { \
- if (!HAVE_OP(t, x)) \
- goto output_overrun; \
- } while (0)
-
-#define TEST_LB(m_pos) \
- do { \
- if ((m_pos) < out) \
- goto lookbehind_overrun; \
- } while (0)
+/* This MAX_255_COUNT is the maximum number of times we can add 255 to a base
+ * count without overflowing an integer. The multiply will overflow when
+ * multiplying 255 by more than MAXINT/255. The sum will overflow earlier
+ * depending on the base count. Since the base count is taken from a u8
+ * and a few bits, it is safe to assume that it will always be lower than
+ * or equal to 2*255, thus we can always prevent any overflow by accepting
+ * two less 255 steps. See Documentation/lzo.txt for more information.
+ */
+#define MAX_255_COUNT ((((size_t)~0) / 255) - 2)
int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len)
@@ -75,17 +65,24 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
if (t < 16) {
if (likely(state == 0)) {
if (unlikely(t == 0)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
- NEED_IP(1, 0);
+ NEED_IP(1);
}
- t += 15 + *ip++;
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 15 + *ip++;
}
t += 3;
copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
+ if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
const unsigned char *ie = ip + t;
unsigned char *oe = op + t;
do {
@@ -101,8 +98,8 @@ copy_literal_run:
} else
#endif
{
- NEED_OP(t, 0);
- NEED_IP(t, 3);
+ NEED_OP(t);
+ NEED_IP(t + 3);
do {
*op++ = *ip++;
} while (--t > 0);
@@ -115,7 +112,7 @@ copy_literal_run:
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
TEST_LB(m_pos);
- NEED_OP(2, 0);
+ NEED_OP(2);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
@@ -136,13 +133,20 @@ copy_literal_run:
} else if (t >= 32) {
t = (t & 31) + (3 - 1);
if (unlikely(t == 2)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
- NEED_IP(1, 0);
+ NEED_IP(1);
}
- t += 31 + *ip++;
- NEED_IP(2, 0);
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 31 + *ip++;
+ NEED_IP(2);
}
m_pos = op - 1;
next = get_unaligned_le16(ip);
@@ -154,13 +158,20 @@ copy_literal_run:
m_pos -= (t & 8) << 11;
t = (t & 7) + (3 - 1);
if (unlikely(t == 2)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
while (unlikely(*ip == 0)) {
- t += 255;
ip++;
- NEED_IP(1, 0);
+ NEED_IP(1);
}
- t += 7 + *ip++;
- NEED_IP(2, 0);
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 7 + *ip++;
+ NEED_IP(2);
}
next = get_unaligned_le16(ip);
ip += 2;
@@ -174,7 +185,7 @@ copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
if (op - m_pos >= 8) {
unsigned char *oe = op + t;
- if (likely(HAVE_OP(t, 15))) {
+ if (likely(HAVE_OP(t + 15))) {
do {
COPY8(op, m_pos);
op += 8;
@@ -184,7 +195,7 @@ copy_literal_run:
m_pos += 8;
} while (op < oe);
op = oe;
- if (HAVE_IP(6, 0)) {
+ if (HAVE_IP(6)) {
state = next;
COPY4(op, ip);
op += next;
@@ -192,7 +203,7 @@ copy_literal_run:
continue;
}
} else {
- NEED_OP(t, 0);
+ NEED_OP(t);
do {
*op++ = *m_pos++;
} while (op < oe);
@@ -201,7 +212,7 @@ copy_literal_run:
#endif
{
unsigned char *oe = op + t;
- NEED_OP(t, 0);
+ NEED_OP(t);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
@@ -214,15 +225,15 @@ match_next:
state = next;
t = next;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
+ if (likely(HAVE_IP(6) && HAVE_OP(4))) {
COPY4(op, ip);
op += t;
ip += t;
} else
#endif
{
- NEED_IP(t, 3);
- NEED_OP(t, 0);
+ NEED_IP(t + 3);
+ NEED_OP(t);
while (t > 0) {
*op++ = *ip++;
t--;
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index fe5a3342e960..a89cf09a8268 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -184,3 +184,19 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
}
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
+
+/*
+ * XXX: Temporary kludge to work around SCSI blk-mq stall. Used only by
+ * block/blk-mq.c::blk_mq_freeze_queue(). Will be removed during v3.18
+ * devel cycle. Do not use anywhere else.
+ */
+void __percpu_ref_kill_expedited(struct percpu_ref *ref)
+{
+ WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
+ "percpu_ref_kill() called more than once on %pf!",
+ ref->release);
+
+ ref->pcpu_count_ptr |= PCPU_REF_DEAD;
+ synchronize_sched_expedited();
+ percpu_ref_kill_rcu(&ref->rcu);
+}
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index a2c78810ebc1..3d2b4733f6cb 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -23,7 +23,6 @@
#include <linux/hash.h>
#include <linux/random.h>
#include <linux/rhashtable.h>
-#include <linux/log2.h>
#define HASH_DEFAULT_SIZE 64UL
#define HASH_MIN_SIZE 4UL
@@ -55,7 +54,7 @@ static u32 __hashfn(const struct rhashtable *ht, const void *key,
/**
* rhashtable_hashfn - compute hash for key of given length
- * @ht: hash table to compuate for
+ * @ht: hash table to compute for
* @key: pointer to key
* @len: length of key
*
@@ -86,7 +85,7 @@ static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
/**
* rhashtable_obj_hashfn - compute hash for hashed object
- * @ht: hash table to compuate for
+ * @ht: hash table to compute for
* @ptr: pointer to hashed object
*
* Computes the hash value using the hash function `hashfn` respectively
@@ -589,13 +588,13 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
* rhashtable_destroy - destroy hash table
* @ht: the hash table to destroy
*
- * Frees the bucket array.
+ * Frees the bucket array. This function is not rcu safe, therefore the caller
+ * has to make sure that no resizing may happen by unpublishing the hashtable
+ * and waiting for the quiescent cycle before releasing the bucket array.
*/
void rhashtable_destroy(const struct rhashtable *ht)
{
- const struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
-
- bucket_table_free(tbl);
+ bucket_table_free(ht->tbl);
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);
diff --git a/lib/string.c b/lib/string.c
index 992bf30af759..f3c6ff596414 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -807,9 +807,9 @@ void *memchr_inv(const void *start, int c, size_t bytes)
return check_bytes8(start, value, bytes);
value64 = value;
-#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
value64 *= 0x0101010101010101;
-#elif defined(ARCH_HAS_FAST_MULTIPLIER)
+#elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER)
value64 *= 0x01010101;
value64 |= value64 << 32;
#else
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 6fe2c84eb055..ba3cd0a35640 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1937,7 +1937,7 @@ EXPORT_SYMBOL(sprintf);
* @args: Arguments for the format string
*
* The format follows C99 vsnprintf, except %n is ignored, and its argument
- * is skiped.
+ * is skipped.
*
* The return value is the number of words(32bits) which would be generated for
* the given input.
OpenPOWER on IntegriCloud