summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/auditsc.c13
-rw-r--r--kernel/bpf/arraymap.c2
-rw-r--r--kernel/bpf/btf.c14
-rw-r--r--kernel/events/core.c10
-rw-r--r--kernel/irq/manage.c9
-rw-r--r--kernel/locking/rtmutex.c29
-rw-r--r--kernel/sched/deadline.c8
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/topology.c2
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/stop_machine.c10
-rw-r--r--kernel/time/tick-sched.c2
12 files changed, 85 insertions, 18 deletions
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index ceb1c4596c51..80d672a11088 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1279,8 +1279,12 @@ static void show_special(struct audit_context *context, int *call_panic)
break;
case AUDIT_KERN_MODULE:
audit_log_format(ab, "name=");
- audit_log_untrustedstring(ab, context->module.name);
- kfree(context->module.name);
+ if (context->module.name) {
+ audit_log_untrustedstring(ab, context->module.name);
+ kfree(context->module.name);
+ } else
+ audit_log_format(ab, "(null)");
+
break;
}
audit_log_end(ab);
@@ -2411,8 +2415,9 @@ void __audit_log_kern_module(char *name)
{
struct audit_context *context = audit_context();
- context->module.name = kmalloc(strlen(name) + 1, GFP_KERNEL);
- strcpy(context->module.name, name);
+ context->module.name = kstrdup(name, GFP_KERNEL);
+ if (!context->module.name)
+ audit_log_lost("out of memory in __audit_log_kern_module");
context->type = AUDIT_KERN_MODULE;
}
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 544e58f5f642..2aa55d030c77 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -378,7 +378,7 @@ static int array_map_check_btf(const struct bpf_map *map, const struct btf *btf,
return -EINVAL;
value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
- if (!value_type || value_size > map->value_size)
+ if (!value_type || value_size != map->value_size)
return -EINVAL;
return 0;
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 9704934252b3..2590700237c1 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -1519,9 +1519,9 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
{
bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
const struct btf_member *member;
+ u32 meta_needed, last_offset;
struct btf *btf = env->btf;
u32 struct_size = t->size;
- u32 meta_needed;
u16 i;
meta_needed = btf_type_vlen(t) * sizeof(*member);
@@ -1534,6 +1534,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
btf_verifier_log_type(env, t, NULL);
+ last_offset = 0;
for_each_member(i, t, member) {
if (!btf_name_offset_valid(btf, member->name_off)) {
btf_verifier_log_member(env, t, member,
@@ -1555,6 +1556,16 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
+ /*
+ * ">" instead of ">=" because the last member could be
+ * "char a[0];"
+ */
+ if (last_offset > member->offset) {
+ btf_verifier_log_member(env, t, member,
+ "Invalid member bits_offset");
+ return -EINVAL;
+ }
+
if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) {
btf_verifier_log_member(env, t, member,
"Memmber bits_offset exceeds its struct size");
@@ -1562,6 +1573,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
}
btf_verifier_log_member(env, t, member, NULL);
+ last_offset = member->offset;
}
return meta_needed;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 8f0434a9951a..eec2d5fb676b 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6343,7 +6343,7 @@ static u64 perf_virt_to_phys(u64 virt)
static struct perf_callchain_entry __empty_callchain = { .nr = 0, };
-static struct perf_callchain_entry *
+struct perf_callchain_entry *
perf_callchain(struct perf_event *event, struct pt_regs *regs)
{
bool kernel = !event->attr.exclude_callchain_kernel;
@@ -6382,7 +6382,9 @@ void perf_prepare_sample(struct perf_event_header *header,
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
int size = 1;
- data->callchain = perf_callchain(event, regs);
+ if (!(sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
+ data->callchain = perf_callchain(event, regs);
+
size += data->callchain->nr;
header->size += size * sizeof(u64);
@@ -7335,6 +7337,10 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter,
struct file *file, unsigned long offset,
unsigned long size)
{
+ /* d_inode(NULL) won't be equal to any mapped user-space file */
+ if (!filter->path.dentry)
+ return false;
+
if (d_inode(filter->path.dentry) != file_inode(file))
return false;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index daeabd791d58..9a8b7ba9aa88 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1068,6 +1068,13 @@ static int irq_setup_forced_threading(struct irqaction *new)
if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
return 0;
+ /*
+ * No further action required for interrupts which are requested as
+ * threaded interrupts already
+ */
+ if (new->handler == irq_default_primary_handler)
+ return 0;
+
new->flags |= IRQF_ONESHOT;
/*
@@ -1075,7 +1082,7 @@ static int irq_setup_forced_threading(struct irqaction *new)
* thread handler. We force thread them as well by creating a
* secondary action.
*/
- if (new->handler != irq_default_primary_handler && new->thread_fn) {
+ if (new->handler && new->thread_fn) {
/* Allocate the secondary action */
new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!new->secondary)
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 4f014be7a4b8..2823d4163a37 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1465,6 +1465,29 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
rt_mutex_postunlock(&wake_q);
}
+static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
+{
+ might_sleep();
+
+ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/**
+ * rt_mutex_lock_nested - lock a rt_mutex
+ *
+ * @lock: the rt_mutex to be locked
+ * @subclass: the lockdep subclass
+ */
+void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
+{
+ __rt_mutex_lock(lock, subclass);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
+#endif
+
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
/**
* rt_mutex_lock - lock a rt_mutex
*
@@ -1472,12 +1495,10 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
*/
void __sched rt_mutex_lock(struct rt_mutex *lock)
{
- might_sleep();
-
- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
+ __rt_mutex_lock(lock, 0);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);
+#endif
/**
* rt_mutex_lock_interruptible - lock a rt_mutex interruptible
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 10c7b51c0d1f..b5fbdde6afa9 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2090,8 +2090,14 @@ retry:
sub_rq_bw(&next_task->dl, &rq->dl);
set_task_cpu(next_task, later_rq->cpu);
add_rq_bw(&next_task->dl, &later_rq->dl);
+
+ /*
+ * Update the later_rq clock here, because the clock is used
+ * by the cpufreq_update_util() inside __add_running_bw().
+ */
+ update_rq_clock(later_rq);
add_running_bw(&next_task->dl, &later_rq->dl);
- activate_task(later_rq, next_task, 0);
+ activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
ret = 1;
resched_curr(later_rq);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 572567078b60..eaaec8364f96 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -836,6 +836,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
* can be time-consuming. Try to avoid it when possible.
*/
raw_spin_lock(&rt_rq->rt_runtime_lock);
+ if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
+ rt_rq->rt_runtime = rt_b->rt_runtime;
skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
if (skip)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 05a831427bc7..56a0fed30c0a 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -47,7 +47,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
}
- if (!cpumask_test_cpu(cpu, sched_group_span(group))) {
+ if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 75ffc1d1a2e0..6f584861d329 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -390,7 +390,7 @@ static inline void tick_irq_exit(void)
/* Make sure that timer wheel updates are propagated */
if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
- if (!in_interrupt())
+ if (!in_irq())
tick_nohz_irq_exit();
}
#endif
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 1ff523dae6e2..e190d1ef3a23 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -260,6 +260,15 @@ retry:
err = 0;
__cpu_stop_queue_work(stopper1, work1, &wakeq);
__cpu_stop_queue_work(stopper2, work2, &wakeq);
+ /*
+ * The waking up of stopper threads has to happen
+ * in the same scheduling context as the queueing.
+ * Otherwise, there is a possibility of one of the
+ * above stoppers being woken up by another CPU,
+ * and preempting us. This will cause us to n ot
+ * wake up the other stopper forever.
+ */
+ preempt_disable();
unlock:
raw_spin_unlock(&stopper2->lock);
raw_spin_unlock_irq(&stopper1->lock);
@@ -271,7 +280,6 @@ unlock:
}
if (!err) {
- preempt_disable();
wake_up_q(&wakeq);
preempt_enable();
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index da9455a6b42b..5b33e2f5c0ed 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -642,7 +642,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
static inline bool local_timer_softirq_pending(void)
{
- return local_softirq_pending() & TIMER_SOFTIRQ;
+ return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
}
static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
OpenPOWER on IntegriCloud