summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug44
-rw-r--r--lib/Makefile6
-rw-r--r--lib/atomic64.c66
-rw-r--r--lib/dynamic_debug.c174
-rw-r--r--lib/hexdump.c15
-rw-r--r--lib/idr.c2
-rw-r--r--lib/kobject_uevent.c2
-rw-r--r--lib/llist.c74
-rw-r--r--lib/percpu_counter.c18
-rw-r--r--lib/proportions.c12
-rw-r--r--lib/ratelimit.c4
-rw-r--r--lib/rwsem-spinlock.c38
-rw-r--r--lib/rwsem.c14
-rw-r--r--lib/sha1.c1
-rw-r--r--lib/smp_processor_id.c2
-rw-r--r--lib/xz/xz_dec_bcj.c27
17 files changed, 271 insertions, 231 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 6c695ff9caba..32f3e5ae2be5 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -276,7 +276,4 @@ config CORDIC
so its calculations are in fixed point. Modules can select this
when they require this function. Module will be called cordic.
-config LLIST
- bool
-
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c0cb9c4bc46d..75330bd87565 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -117,31 +117,31 @@ config DEBUG_SECTION_MISMATCH
help
The section mismatch analysis checks if there are illegal
references from one section to another section.
- Linux will during link or during runtime drop some sections
- and any use of code/data previously in these sections will
+ During linktime or runtime, some sections are dropped;
+ any use of code/data previously in these sections would
most likely result in an oops.
- In the code functions and variables are annotated with
- __init, __devinit etc. (see full list in include/linux/init.h)
+ In the code, functions and variables are annotated with
+ __init, __devinit, etc. (see the full list in include/linux/init.h),
which results in the code/data being placed in specific sections.
- The section mismatch analysis is always done after a full
- kernel build but enabling this option will in addition
- do the following:
- - Add the option -fno-inline-functions-called-once to gcc
- When inlining a function annotated __init in a non-init
- function we would lose the section information and thus
+ The section mismatch analysis is always performed after a full
+ kernel build, and enabling this option causes the following
+ additional steps to occur:
+ - Add the option -fno-inline-functions-called-once to gcc commands.
+ When inlining a function annotated with __init in a non-init
+ function, we would lose the section information and thus
the analysis would not catch the illegal reference.
- This option tells gcc to inline less but will also
- result in a larger kernel.
- - Run the section mismatch analysis for each module/built-in.o
- When we run the section mismatch analysis on vmlinux.o we
+ This option tells gcc to inline less (but it does result in
+ a larger kernel).
+ - Run the section mismatch analysis for each module/built-in.o file.
+ When we run the section mismatch analysis on vmlinux.o, we
lose valueble information about where the mismatch was
introduced.
Running the analysis for each module/built-in.o file
- will tell where the mismatch happens much closer to the
- source. The drawback is that we will report the same
- mismatch at least twice.
- - Enable verbose reporting from modpost to help solving
- the section mismatches reported.
+ tells where the mismatch happens much closer to the
+ source. The drawback is that the same mismatch is
+ reported at least twice.
+ - Enable verbose reporting from modpost in order to help resolve
+ the section mismatches that are reported.
config DEBUG_KERNEL
bool "Kernel debugging"
@@ -835,7 +835,7 @@ config DEBUG_CREDENTIALS
#
# Select this config option from the architecture Kconfig, if it
-# it is preferred to always offer frame pointers as a config
+# is preferred to always offer frame pointers as a config
# option on the architecture (regardless of KERNEL_DEBUG):
#
config ARCH_WANT_FRAME_POINTERS
@@ -1081,7 +1081,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
- select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE
+ select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
help
Provide stacktrace filter for fault-injection capabilities
@@ -1091,7 +1091,7 @@ config LATENCYTOP
depends on DEBUG_KERNEL
depends on STACKTRACE_SUPPORT
depends on PROC_FS
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
select KALLSYMS
select KALLSYMS_ALL
select STACKTRACE
diff --git a/lib/Makefile b/lib/Makefile
index d5d175c8a6ca..a4da283f5dc0 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -12,7 +12,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
idr.o int_sqrt.o extable.o prio_tree.o \
sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
proportions.o prio_heap.o ratelimit.o show_mem.o \
- is_single_threaded.o plist.o decompress.o find_next_bit.o
+ is_single_threaded.o plist.o decompress.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
@@ -22,7 +22,7 @@ lib-y += kobject.o kref.o klist.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \
- bsearch.o find_last_bit.o
+ bsearch.o find_last_bit.o find_next_bit.o llist.o
obj-y += kstrtox.o
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
@@ -115,8 +115,6 @@ obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
obj-$(CONFIG_CORDIC) += cordic.o
-obj-$(CONFIG_LLIST) += llist.o
-
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/atomic64.c b/lib/atomic64.c
index e12ae0dd08a8..3975470caf4f 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -29,11 +29,11 @@
* Ensure each lock is in a separate cacheline.
*/
static union {
- spinlock_t lock;
+ raw_spinlock_t lock;
char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
-static inline spinlock_t *lock_addr(const atomic64_t *v)
+static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
{
unsigned long addr = (unsigned long) v;
@@ -45,12 +45,12 @@ static inline spinlock_t *lock_addr(const atomic64_t *v)
long long atomic64_read(const atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_read);
@@ -58,34 +58,34 @@ EXPORT_SYMBOL(atomic64_read);
void atomic64_set(atomic64_t *v, long long i)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
v->counter = i;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(atomic64_set);
void atomic64_add(long long a, atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
v->counter += a;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(atomic64_add);
long long atomic64_add_return(long long a, atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter += a;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_add_return);
@@ -93,23 +93,23 @@ EXPORT_SYMBOL(atomic64_add_return);
void atomic64_sub(long long a, atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
v->counter -= a;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(atomic64_sub);
long long atomic64_sub_return(long long a, atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter -= a;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_sub_return);
@@ -117,14 +117,14 @@ EXPORT_SYMBOL(atomic64_sub_return);
long long atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter - 1;
if (val >= 0)
v->counter = val;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_dec_if_positive);
@@ -132,14 +132,14 @@ EXPORT_SYMBOL(atomic64_dec_if_positive);
long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter;
if (val == o)
v->counter = n;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_cmpxchg);
@@ -147,13 +147,13 @@ EXPORT_SYMBOL(atomic64_cmpxchg);
long long atomic64_xchg(atomic64_t *v, long long new)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter;
v->counter = new;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_xchg);
@@ -161,15 +161,15 @@ EXPORT_SYMBOL(atomic64_xchg);
int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
unsigned long flags;
- spinlock_t *lock = lock_addr(v);
+ raw_spinlock_t *lock = lock_addr(v);
int ret = 0;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
if (v->counter != u) {
v->counter += a;
ret = 1;
}
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return ret;
}
EXPORT_SYMBOL(atomic64_add_unless);
@@ -179,7 +179,7 @@ static int init_atomic64_lock(void)
int i;
for (i = 0; i < NR_LOCKS; ++i)
- spin_lock_init(&atomic64_lock[i].lock);
+ raw_spin_lock_init(&atomic64_lock[i].lock);
return 0;
}
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 75ca78f3a8c9..dcdade39e47f 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -10,11 +10,12 @@
* Copyright (C) 2011 Bart Van Assche. All Rights Reserved.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kallsyms.h>
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/proc_fs.h>
@@ -30,6 +31,8 @@
#include <linux/jump_label.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
extern struct _ddebug __start___verbose[];
extern struct _ddebug __stop___verbose[];
@@ -38,7 +41,6 @@ struct ddebug_table {
struct list_head link;
char *mod_name;
unsigned int num_ddebugs;
- unsigned int num_enabled;
struct _ddebug *ddebugs;
};
@@ -148,19 +150,13 @@ static void ddebug_change(const struct ddebug_query *query,
newflags = (dp->flags & mask) | flags;
if (newflags == dp->flags)
continue;
-
- if (!newflags)
- dt->num_enabled--;
- else if (!dp->flags)
- dt->num_enabled++;
dp->flags = newflags;
if (newflags)
dp->enabled = 1;
else
dp->enabled = 0;
if (verbose)
- printk(KERN_INFO
- "ddebug: changed %s:%d [%s]%s %s\n",
+ pr_info("changed %s:%d [%s]%s %s\n",
dp->filename, dp->lineno,
dt->mod_name, dp->function,
ddebug_describe_flags(dp, flagbuf,
@@ -170,7 +166,7 @@ static void ddebug_change(const struct ddebug_query *query,
mutex_unlock(&ddebug_lock);
if (!nfound && verbose)
- printk(KERN_INFO "ddebug: no matches for query\n");
+ pr_info("no matches for query\n");
}
/*
@@ -215,10 +211,10 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
if (verbose) {
int i;
- printk(KERN_INFO "%s: split into words:", __func__);
+ pr_info("split into words:");
for (i = 0 ; i < nwords ; i++)
- printk(" \"%s\"", words[i]);
- printk("\n");
+ pr_cont(" \"%s\"", words[i]);
+ pr_cont("\n");
}
return nwords;
@@ -330,16 +326,15 @@ static int ddebug_parse_query(char *words[], int nwords,
}
} else {
if (verbose)
- printk(KERN_ERR "%s: unknown keyword \"%s\"\n",
- __func__, words[i]);
+ pr_err("unknown keyword \"%s\"\n", words[i]);
return -EINVAL;
}
}
if (verbose)
- printk(KERN_INFO "%s: q->function=\"%s\" q->filename=\"%s\" "
- "q->module=\"%s\" q->format=\"%s\" q->lineno=%u-%u\n",
- __func__, query->function, query->filename,
+ pr_info("q->function=\"%s\" q->filename=\"%s\" "
+ "q->module=\"%s\" q->format=\"%s\" q->lineno=%u-%u\n",
+ query->function, query->filename,
query->module, query->format, query->first_lineno,
query->last_lineno);
@@ -368,7 +363,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
return -EINVAL;
}
if (verbose)
- printk(KERN_INFO "%s: op='%c'\n", __func__, op);
+ pr_info("op='%c'\n", op);
for ( ; *str ; ++str) {
for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) {
@@ -383,7 +378,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
if (flags == 0)
return -EINVAL;
if (verbose)
- printk(KERN_INFO "%s: flags=0x%x\n", __func__, flags);
+ pr_info("flags=0x%x\n", flags);
/* calculate final *flagsp, *maskp according to mask and op */
switch (op) {
@@ -401,8 +396,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
break;
}
if (verbose)
- printk(KERN_INFO "%s: *flagsp=0x%x *maskp=0x%x\n",
- __func__, *flagsp, *maskp);
+ pr_info("*flagsp=0x%x *maskp=0x%x\n", *flagsp, *maskp);
return 0;
}
@@ -427,40 +421,117 @@ static int ddebug_exec_query(char *query_string)
return 0;
}
+#define PREFIX_SIZE 64
+
+static int remaining(int wrote)
+{
+ if (PREFIX_SIZE - wrote > 0)
+ return PREFIX_SIZE - wrote;
+ return 0;
+}
+
+static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
+{
+ int pos_after_tid;
+ int pos = 0;
+
+ pos += snprintf(buf + pos, remaining(pos), "%s", KERN_DEBUG);
+ if (desc->flags & _DPRINTK_FLAGS_INCL_TID) {
+ if (in_interrupt())
+ pos += snprintf(buf + pos, remaining(pos), "%s ",
+ "<intr>");
+ else
+ pos += snprintf(buf + pos, remaining(pos), "[%d] ",
+ task_pid_vnr(current));
+ }
+ pos_after_tid = pos;
+ if (desc->flags & _DPRINTK_FLAGS_INCL_MODNAME)
+ pos += snprintf(buf + pos, remaining(pos), "%s:",
+ desc->modname);
+ if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME)
+ pos += snprintf(buf + pos, remaining(pos), "%s:",
+ desc->function);
+ if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO)
+ pos += snprintf(buf + pos, remaining(pos), "%d:", desc->lineno);
+ if (pos - pos_after_tid)
+ pos += snprintf(buf + pos, remaining(pos), " ");
+ if (pos >= PREFIX_SIZE)
+ buf[PREFIX_SIZE - 1] = '\0';
+
+ return buf;
+}
+
int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
{
va_list args;
int res;
+ struct va_format vaf;
+ char buf[PREFIX_SIZE];
BUG_ON(!descriptor);
BUG_ON(!fmt);
va_start(args, fmt);
- res = printk(KERN_DEBUG);
- if (descriptor->flags & _DPRINTK_FLAGS_INCL_TID) {
- if (in_interrupt())
- res += printk(KERN_CONT "<intr> ");
- else
- res += printk(KERN_CONT "[%d] ", task_pid_vnr(current));
- }
- if (descriptor->flags & _DPRINTK_FLAGS_INCL_MODNAME)
- res += printk(KERN_CONT "%s:", descriptor->modname);
- if (descriptor->flags & _DPRINTK_FLAGS_INCL_FUNCNAME)
- res += printk(KERN_CONT "%s:", descriptor->function);
- if (descriptor->flags & _DPRINTK_FLAGS_INCL_LINENO)
- res += printk(KERN_CONT "%d ", descriptor->lineno);
- res += vprintk(fmt, args);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ res = printk("%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf);
va_end(args);
return res;
}
EXPORT_SYMBOL(__dynamic_pr_debug);
+int __dynamic_dev_dbg(struct _ddebug *descriptor,
+ const struct device *dev, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int res;
+ char buf[PREFIX_SIZE];
+
+ BUG_ON(!descriptor);
+ BUG_ON(!fmt);
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ res = __dev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf);
+ va_end(args);
+
+ return res;
+}
+EXPORT_SYMBOL(__dynamic_dev_dbg);
+
+#ifdef CONFIG_NET
+
+int __dynamic_netdev_dbg(struct _ddebug *descriptor,
+ const struct net_device *dev, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int res;
+ char buf[PREFIX_SIZE];
+
+ BUG_ON(!descriptor);
+ BUG_ON(!fmt);
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ res = __netdev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf);
+ va_end(args);
+
+ return res;
+}
+EXPORT_SYMBOL(__dynamic_netdev_dbg);
+
+#endif
+
static __initdata char ddebug_setup_string[1024];
static __init int ddebug_setup_query(char *str)
{
if (strlen(str) >= 1024) {
- pr_warning("ddebug boot param string too large\n");
+ pr_warn("ddebug boot param string too large\n");
return 0;
}
strcpy(ddebug_setup_string, str);
@@ -488,8 +559,7 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
return -EFAULT;
tmpbuf[len] = '\0';
if (verbose)
- printk(KERN_INFO "%s: read %d bytes from userspace\n",
- __func__, (int)len);
+ pr_info("read %d bytes from userspace\n", (int)len);
ret = ddebug_exec_query(tmpbuf);
if (ret)
@@ -552,8 +622,7 @@ static void *ddebug_proc_start(struct seq_file *m, loff_t *pos)
int n = *pos;
if (verbose)
- printk(KERN_INFO "%s: called m=%p *pos=%lld\n",
- __func__, m, (unsigned long long)*pos);
+ pr_info("called m=%p *pos=%lld\n", m, (unsigned long long)*pos);
mutex_lock(&ddebug_lock);
@@ -578,8 +647,8 @@ static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos)
struct _ddebug *dp;
if (verbose)
- printk(KERN_INFO "%s: called m=%p p=%p *pos=%lld\n",
- __func__, m, p, (unsigned long long)*pos);
+ pr_info("called m=%p p=%p *pos=%lld\n",
+ m, p, (unsigned long long)*pos);
if (p == SEQ_START_TOKEN)
dp = ddebug_iter_first(iter);
@@ -602,8 +671,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
char flagsbuf[8];
if (verbose)
- printk(KERN_INFO "%s: called m=%p p=%p\n",
- __func__, m, p);
+ pr_info("called m=%p p=%p\n", m, p);
if (p == SEQ_START_TOKEN) {
seq_puts(m,
@@ -628,8 +696,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
static void ddebug_proc_stop(struct seq_file *m, void *p)
{
if (verbose)
- printk(KERN_INFO "%s: called m=%p p=%p\n",
- __func__, m, p);
+ pr_info("called m=%p p=%p\n", m, p);
mutex_unlock(&ddebug_lock);
}
@@ -652,7 +719,7 @@ static int ddebug_proc_open(struct inode *inode, struct file *file)
int err;
if (verbose)
- printk(KERN_INFO "%s: called\n", __func__);
+ pr_info("called\n");
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (iter == NULL)
@@ -696,7 +763,6 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
}
dt->mod_name = new_name;
dt->num_ddebugs = n;
- dt->num_enabled = 0;
dt->ddebugs = tab;
mutex_lock(&ddebug_lock);
@@ -704,8 +770,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
mutex_unlock(&ddebug_lock);
if (verbose)
- printk(KERN_INFO "%u debug prints in module %s\n",
- n, dt->mod_name);
+ pr_info("%u debug prints in module %s\n", n, dt->mod_name);
return 0;
}
EXPORT_SYMBOL_GPL(ddebug_add_module);
@@ -727,8 +792,7 @@ int ddebug_remove_module(const char *mod_name)
int ret = -ENOENT;
if (verbose)
- printk(KERN_INFO "%s: removing module \"%s\"\n",
- __func__, mod_name);
+ pr_info("removing module \"%s\"\n", mod_name);
mutex_lock(&ddebug_lock);
list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) {
@@ -804,8 +868,8 @@ static int __init dynamic_debug_init(void)
if (ddebug_setup_string[0] != '\0') {
ret = ddebug_exec_query(ddebug_setup_string);
if (ret)
- pr_warning("Invalid ddebug boot param %s",
- ddebug_setup_string);
+ pr_warn("Invalid ddebug boot param %s",
+ ddebug_setup_string);
else
pr_info("ddebug initialized with string %s",
ddebug_setup_string);
diff --git a/lib/hexdump.c b/lib/hexdump.c
index f5fe6ba7a3ab..51d5ae210244 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -38,14 +38,21 @@ EXPORT_SYMBOL(hex_to_bin);
* @dst: binary result
* @src: ascii hexadecimal string
* @count: result length
+ *
+ * Return 0 on success, -1 in case of bad input.
*/
-void hex2bin(u8 *dst, const char *src, size_t count)
+int hex2bin(u8 *dst, const char *src, size_t count)
{
while (count--) {
- *dst = hex_to_bin(*src++) << 4;
- *dst += hex_to_bin(*src++);
- dst++;
+ int hi = hex_to_bin(*src++);
+ int lo = hex_to_bin(*src++);
+
+ if ((hi < 0) || (lo < 0))
+ return -1;
+
+ *dst++ = (hi << 4) | lo;
}
+ return 0;
}
EXPORT_SYMBOL(hex2bin);
diff --git a/lib/idr.c b/lib/idr.c
index db040ce3fa73..5acf9bb10968 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -860,7 +860,7 @@ EXPORT_SYMBOL(ida_get_new_above);
* and go back to the idr_pre_get() call. If the idr is full, it will
* return %-ENOSPC.
*
- * @id returns a value in the range %0 ... %0x7fffffff.
+ * @p_id returns a value in the range %0 ... %0x7fffffff.
*/
int ida_get_new(struct ida *ida, int *p_id)
{
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 70af0a7f97c0..ad72a03ce5e9 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -282,7 +282,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
kobj_bcast_filter,
kobj);
/* ENOBUFS should be handled in userspace */
- if (retval == -ENOBUFS)
+ if (retval == -ENOBUFS || retval == -ESRCH)
retval = 0;
} else
retval = -ENOMEM;
diff --git a/lib/llist.c b/lib/llist.c
index da445724fa1f..700cff77a387 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -3,8 +3,8 @@
*
* The basic atomic operation of this list is cmpxchg on long. On
* architectures that don't have NMI-safe cmpxchg implementation, the
- * list can NOT be used in NMI handler. So code uses the list in NMI
- * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
+ * list can NOT be used in NMI handlers. So code that uses the list in
+ * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
*
* Copyright 2010,2011 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
@@ -30,48 +30,28 @@
#include <asm/system.h>
/**
- * llist_add - add a new entry
- * @new: new entry to be added
- * @head: the head for your lock-less list
- */
-void llist_add(struct llist_node *new, struct llist_head *head)
-{
- struct llist_node *entry, *old_entry;
-
-#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- BUG_ON(in_nmi());
-#endif
-
- entry = head->first;
- do {
- old_entry = entry;
- new->next = entry;
- cpu_relax();
- } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry);
-}
-EXPORT_SYMBOL_GPL(llist_add);
-
-/**
* llist_add_batch - add several linked entries in batch
* @new_first: first entry in batch to be added
* @new_last: last entry in batch to be added
* @head: the head for your lock-less list
+ *
+ * Return whether list is empty before adding.
*/
-void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
+bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
struct llist_head *head)
{
struct llist_node *entry, *old_entry;
-#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- BUG_ON(in_nmi());
-#endif
-
entry = head->first;
- do {
+ for (;;) {
old_entry = entry;
new_last->next = entry;
- cpu_relax();
- } while ((entry = cmpxchg(&head->first, old_entry, new_first)) != old_entry);
+ entry = cmpxchg(&head->first, old_entry, new_first);
+ if (entry == old_entry)
+ break;
+ }
+
+ return old_entry == NULL;
}
EXPORT_SYMBOL_GPL(llist_add_batch);
@@ -93,37 +73,17 @@ struct llist_node *llist_del_first(struct llist_head *head)
{
struct llist_node *entry, *old_entry, *next;
-#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- BUG_ON(in_nmi());
-#endif
-
entry = head->first;
- do {
+ for (;;) {
if (entry == NULL)
return NULL;
old_entry = entry;
next = entry->next;
- cpu_relax();
- } while ((entry = cmpxchg(&head->first, old_entry, next)) != old_entry);
+ entry = cmpxchg(&head->first, old_entry, next);
+ if (entry == old_entry)
+ break;
+ }
return entry;
}
EXPORT_SYMBOL_GPL(llist_del_first);
-
-/**
- * llist_del_all - delete all entries from lock-less list
- * @head: the head of lock-less list to delete all entries
- *
- * If list is empty, return NULL, otherwise, delete all entries and
- * return the pointer to the first entry. The order of entries
- * deleted is from the newest to the oldest added one.
- */
-struct llist_node *llist_del_all(struct llist_head *head)
-{
-#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
- BUG_ON(in_nmi());
-#endif
-
- return xchg(&head->first, NULL);
-}
-EXPORT_SYMBOL_GPL(llist_del_all);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 28f2c33c6b53..f087105ed914 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -59,13 +59,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
int cpu;
- spin_lock(&fbc->lock);
+ raw_spin_lock(&fbc->lock);
for_each_possible_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
*pcount = 0;
}
fbc->count = amount;
- spin_unlock(&fbc->lock);
+ raw_spin_unlock(&fbc->lock);
}
EXPORT_SYMBOL(percpu_counter_set);
@@ -76,10 +76,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
preempt_disable();
count = __this_cpu_read(*fbc->counters) + amount;
if (count >= batch || count <= -batch) {
- spin_lock(&fbc->lock);
+ raw_spin_lock(&fbc->lock);
fbc->count += count;
__this_cpu_write(*fbc->counters, 0);
- spin_unlock(&fbc->lock);
+ raw_spin_unlock(&fbc->lock);
} else {
__this_cpu_write(*fbc->counters, count);
}
@@ -96,13 +96,13 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
s64 ret;
int cpu;
- spin_lock(&fbc->lock);
+ raw_spin_lock(&fbc->lock);
ret = fbc->count;
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
- spin_unlock(&fbc->lock);
+ raw_spin_unlock(&fbc->lock);
return ret;
}
EXPORT_SYMBOL(__percpu_counter_sum);
@@ -110,7 +110,7 @@ EXPORT_SYMBOL(__percpu_counter_sum);
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
struct lock_class_key *key)
{
- spin_lock_init(&fbc->lock);
+ raw_spin_lock_init(&fbc->lock);
lockdep_set_class(&fbc->lock, key);
fbc->count = amount;
fbc->counters = alloc_percpu(s32);
@@ -173,11 +173,11 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
s32 *pcount;
unsigned long flags;
- spin_lock_irqsave(&fbc->lock, flags);
+ raw_spin_lock_irqsave(&fbc->lock, flags);
pcount = per_cpu_ptr(fbc->counters, cpu);
fbc->count += *pcount;
*pcount = 0;
- spin_unlock_irqrestore(&fbc->lock, flags);
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
mutex_unlock(&percpu_counters_lock);
#endif
diff --git a/lib/proportions.c b/lib/proportions.c
index d50746a79de2..05df84801b56 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
int prop_local_init_percpu(struct prop_local_percpu *pl)
{
- spin_lock_init(&pl->lock);
+ raw_spin_lock_init(&pl->lock);
pl->shift = 0;
pl->period = 0;
return percpu_counter_init(&pl->events, 0);
@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
if (pl->period == global_period)
return;
- spin_lock_irqsave(&pl->lock, flags);
+ raw_spin_lock_irqsave(&pl->lock, flags);
prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
/*
@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
percpu_counter_set(&pl->events, 0);
pl->period = global_period;
- spin_unlock_irqrestore(&pl->lock, flags);
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
}
/*
@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_descriptor *pd,
int prop_local_init_single(struct prop_local_single *pl)
{
- spin_lock_init(&pl->lock);
+ raw_spin_lock_init(&pl->lock);
pl->shift = 0;
pl->period = 0;
pl->events = 0;
@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
if (pl->period == global_period)
return;
- spin_lock_irqsave(&pl->lock, flags);
+ raw_spin_lock_irqsave(&pl->lock, flags);
prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
/*
* For each missed period, we half the local counter.
@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
else
pl->events = 0;
pl->period = global_period;
- spin_unlock_irqrestore(&pl->lock, flags);
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
}
/*
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 027a03f4c56d..c96d500577de 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -39,7 +39,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
* in addition to the one that will be printed by
* the entity that is holding the lock already:
*/
- if (!spin_trylock_irqsave(&rs->lock, flags))
+ if (!raw_spin_trylock_irqsave(&rs->lock, flags))
return 0;
if (!rs->begin)
@@ -60,7 +60,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
rs->missed++;
ret = 0;
}
- spin_unlock_irqrestore(&rs->lock, flags);
+ raw_spin_unlock_irqrestore(&rs->lock, flags);
return ret;
}
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index ffc9fc7f3b05..f2393c21fe85 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -22,9 +22,9 @@ int rwsem_is_locked(struct rw_semaphore *sem)
int ret = 1;
unsigned long flags;
- if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
+ if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
ret = (sem->activity != 0);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
return ret;
}
@@ -44,7 +44,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
sem->activity = 0;
- spin_lock_init(&sem->wait_lock);
+ raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
EXPORT_SYMBOL(__init_rwsem);
@@ -145,12 +145,12 @@ void __sched __down_read(struct rw_semaphore *sem)
struct task_struct *tsk;
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity++;
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
goto out;
}
@@ -165,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem)
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
/* wait to be given the lock */
for (;;) {
@@ -189,7 +189,7 @@ int __down_read_trylock(struct rw_semaphore *sem)
int ret = 0;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
@@ -197,7 +197,7 @@ int __down_read_trylock(struct rw_semaphore *sem)
ret = 1;
}
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
}
@@ -212,12 +212,12 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
struct task_struct *tsk;
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity = -1;
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
goto out;
}
@@ -232,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
/* wait to be given the lock */
for (;;) {
@@ -260,7 +260,7 @@ int __down_write_trylock(struct rw_semaphore *sem)
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */
@@ -268,7 +268,7 @@ int __down_write_trylock(struct rw_semaphore *sem)
ret = 1;
}
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
}
@@ -280,12 +280,12 @@ void __up_read(struct rw_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (--sem->activity == 0 && !list_empty(&sem->wait_list))
sem = __rwsem_wake_one_writer(sem);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
/*
@@ -295,13 +295,13 @@ void __up_write(struct rw_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 0;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 1);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
/*
@@ -312,12 +312,12 @@ void __downgrade_write(struct rw_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 1;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 0);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
diff --git a/lib/rwsem.c b/lib/rwsem.c
index aa7c3052261f..410aa1189b13 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -22,7 +22,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
sem->count = RWSEM_UNLOCKED_VALUE;
- spin_lock_init(&sem->wait_lock);
+ raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
@@ -180,7 +180,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
- spin_lock_irq(&sem->wait_lock);
+ raw_spin_lock_irq(&sem->wait_lock);
waiter.task = tsk;
waiter.flags = flags;
get_task_struct(tsk);
@@ -204,7 +204,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
- spin_unlock_irq(&sem->wait_lock);
+ raw_spin_unlock_irq(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
@@ -245,13 +245,13 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return sem;
}
@@ -265,13 +265,13 @@ struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->wait_lock, flags);
+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
- spin_unlock_irqrestore(&sem->wait_lock, flags);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return sem;
}
diff --git a/lib/sha1.c b/lib/sha1.c
index f33271dd00cb..1de509a159c8 100644
--- a/lib/sha1.c
+++ b/lib/sha1.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bitops.h>
+#include <linux/cryptohash.h>
#include <asm/unaligned.h>
/*
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 4689cb073da4..503f087382a4 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor_id(void)
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
- if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu)))
+ if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu)))
goto out;
/*
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
index e51e2558ca9d..a768e6d28bbb 100644
--- a/lib/xz/xz_dec_bcj.c
+++ b/lib/xz/xz_dec_bcj.c
@@ -441,8 +441,12 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
* next filter in the chain. Apply the BCJ filter on the new data
* in the output buffer. If everything cannot be filtered, copy it
* to temp and rewind the output buffer position accordingly.
+ *
+ * This needs to be always run when temp.size == 0 to handle a special
+ * case where the output buffer is full and the next filter has no
+ * more output coming but hasn't returned XZ_STREAM_END yet.
*/
- if (s->temp.size < b->out_size - b->out_pos) {
+ if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) {
out_start = b->out_pos;
memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size);
b->out_pos += s->temp.size;
@@ -465,16 +469,25 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
s->temp.size = b->out_pos - out_start;
b->out_pos -= s->temp.size;
memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size);
+
+ /*
+ * If there wasn't enough input to the next filter to fill
+ * the output buffer with unfiltered data, there's no point
+ * to try decoding more data to temp.
+ */
+ if (b->out_pos + s->temp.size < b->out_size)
+ return XZ_OK;
}
/*
- * If we have unfiltered data in temp, try to fill by decoding more
- * data from the next filter. Apply the BCJ filter on temp. Then we
- * hopefully can fill the actual output buffer by copying filtered
- * data from temp. A mix of filtered and unfiltered data may be left
- * in temp; it will be taken care on the next call to this function.
+ * We have unfiltered data in temp. If the output buffer isn't full
+ * yet, try to fill the temp buffer by decoding more data from the
+ * next filter. Apply the BCJ filter on temp. Then we hopefully can
+ * fill the actual output buffer by copying filtered data from temp.
+ * A mix of filtered and unfiltered data may be left in temp; it will
+ * be taken care on the next call to this function.
*/
- if (s->temp.size > 0) {
+ if (b->out_pos < b->out_size) {
/* Make b->out{,_pos,_size} temporarily point to s->temp. */
s->out = b->out;
s->out_pos = b->out_pos;
OpenPOWER on IntegriCloud