diff options
Diffstat (limited to 'kernel')
53 files changed, 247 insertions, 363 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 8b1dde96a0fa..7b44195da81b 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -231,9 +231,11 @@ static void untag_chunk(struct node *p) if (size) new = alloc_chunk(size); + mutex_lock(&entry->group->mark_mutex); spin_lock(&entry->lock); if (chunk->dead || !entry->inode) { spin_unlock(&entry->lock); + mutex_unlock(&entry->group->mark_mutex); if (new) free_chunk(new); goto out; @@ -251,6 +253,7 @@ static void untag_chunk(struct node *p) list_del_rcu(&chunk->hash); spin_unlock(&hash_lock); spin_unlock(&entry->lock); + mutex_unlock(&entry->group->mark_mutex); fsnotify_destroy_mark(entry, audit_tree_group); goto out; } @@ -258,8 +261,8 @@ static void untag_chunk(struct node *p) if (!new) goto Fallback; - fsnotify_duplicate_mark(&new->mark, entry); - if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, NULL, 1)) { + if (fsnotify_add_mark_locked(&new->mark, entry->group, entry->inode, + NULL, 1)) { fsnotify_put_mark(&new->mark); goto Fallback; } @@ -293,6 +296,7 @@ static void untag_chunk(struct node *p) owner->root = new; spin_unlock(&hash_lock); spin_unlock(&entry->lock); + mutex_unlock(&entry->group->mark_mutex); fsnotify_destroy_mark(entry, audit_tree_group); fsnotify_put_mark(&new->mark); /* drop initial reference */ goto out; @@ -309,6 +313,7 @@ Fallback: put_tree(owner); spin_unlock(&hash_lock); spin_unlock(&entry->lock); + mutex_unlock(&entry->group->mark_mutex); out: fsnotify_put_mark(entry); spin_lock(&hash_lock); @@ -386,18 +391,21 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) chunk_entry = &chunk->mark; + mutex_lock(&old_entry->group->mark_mutex); spin_lock(&old_entry->lock); if (!old_entry->inode) { /* old_entry is being shot, lets just lie */ spin_unlock(&old_entry->lock); + mutex_unlock(&old_entry->group->mark_mutex); fsnotify_put_mark(old_entry); free_chunk(chunk); return -ENOENT; } - fsnotify_duplicate_mark(chunk_entry, old_entry); - if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, NULL, 1)) { + if (fsnotify_add_mark_locked(chunk_entry, old_entry->group, + old_entry->inode, NULL, 1)) { spin_unlock(&old_entry->lock); + mutex_unlock(&old_entry->group->mark_mutex); fsnotify_put_mark(chunk_entry); fsnotify_put_mark(old_entry); return -ENOSPC; @@ -413,6 +421,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) chunk->dead = 1; spin_unlock(&chunk_entry->lock); spin_unlock(&old_entry->lock); + mutex_unlock(&old_entry->group->mark_mutex); fsnotify_destroy_mark(chunk_entry, audit_tree_group); @@ -445,6 +454,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) spin_unlock(&hash_lock); spin_unlock(&chunk_entry->lock); spin_unlock(&old_entry->lock); + mutex_unlock(&old_entry->group->mark_mutex); fsnotify_destroy_mark(old_entry, audit_tree_group); fsnotify_put_mark(chunk_entry); /* drop initial reference */ fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ diff --git a/kernel/capability.c b/kernel/capability.c index 4984e1f552eb..a98e814f216f 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -17,7 +17,7 @@ #include <linux/syscalls.h> #include <linux/pid_namespace.h> #include <linux/user_namespace.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> /* * Leveraged for setting/resetting capabilities diff --git a/kernel/compat.c b/kernel/compat.c index b3a047f208a7..19aec5d98108 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -28,7 +28,7 @@ #include <linux/ptrace.h> #include <linux/gfp.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> static int compat_get_timex(struct timex *txc, struct compat_timex __user *utp) { diff --git a/kernel/configs.c b/kernel/configs.c index c18b1f1ae515..2df132b20217 100644 --- a/kernel/configs.c +++ b/kernel/configs.c @@ -28,7 +28,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> /**************************************************/ /* the actual current config file */ diff --git a/kernel/cpu.c b/kernel/cpu.c index 5339aca811d2..f75c4d031eeb 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -183,23 +183,16 @@ EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen); /* * The following two APIs (cpu_maps_update_begin/done) must be used when * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. - * The APIs cpu_notifier_register_begin/done() must be used to protect CPU - * hotplug callback (un)registration performed using __register_cpu_notifier() - * or __unregister_cpu_notifier(). */ void cpu_maps_update_begin(void) { mutex_lock(&cpu_add_remove_lock); } -EXPORT_SYMBOL(cpu_notifier_register_begin); void cpu_maps_update_done(void) { mutex_unlock(&cpu_add_remove_lock); } -EXPORT_SYMBOL(cpu_notifier_register_done); - -static RAW_NOTIFIER_HEAD(cpu_chain); /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. * Should always be manipulated under cpu_add_remove_lock @@ -349,66 +342,7 @@ void cpu_hotplug_enable(void) EXPORT_SYMBOL_GPL(cpu_hotplug_enable); #endif /* CONFIG_HOTPLUG_CPU */ -/* Need to know about CPUs going up/down? */ -int register_cpu_notifier(struct notifier_block *nb) -{ - int ret; - cpu_maps_update_begin(); - ret = raw_notifier_chain_register(&cpu_chain, nb); - cpu_maps_update_done(); - return ret; -} - -int __register_cpu_notifier(struct notifier_block *nb) -{ - return raw_notifier_chain_register(&cpu_chain, nb); -} - -static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call, - int *nr_calls) -{ - unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0; - void *hcpu = (void *)(long)cpu; - - int ret; - - ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call, - nr_calls); - - return notifier_to_errno(ret); -} - -static int cpu_notify(unsigned long val, unsigned int cpu) -{ - return __cpu_notify(val, cpu, -1, NULL); -} - -static void cpu_notify_nofail(unsigned long val, unsigned int cpu) -{ - BUG_ON(cpu_notify(val, cpu)); -} - /* Notifier wrappers for transitioning to state machine */ -static int notify_prepare(unsigned int cpu) -{ - int nr_calls = 0; - int ret; - - ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls); - if (ret) { - nr_calls--; - printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n", - __func__, cpu); - __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL); - } - return ret; -} - -static int notify_online(unsigned int cpu) -{ - cpu_notify(CPU_ONLINE, cpu); - return 0; -} static int bringup_wait_for_ap(unsigned int cpu) { @@ -433,10 +367,8 @@ static int bringup_cpu(unsigned int cpu) /* Arch-specific enabling code. */ ret = __cpu_up(cpu, idle); irq_unlock_sparse(); - if (ret) { - cpu_notify(CPU_UP_CANCELED, cpu); + if (ret) return ret; - } ret = bringup_wait_for_ap(cpu); BUG_ON(!cpu_online(cpu)); return ret; @@ -565,11 +497,6 @@ static void cpuhp_thread_fun(unsigned int cpu) BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); undo_cpu_down(cpu, st); - /* - * This is a momentary workaround to keep the notifier users - * happy. Will go away once we got rid of the notifiers. - */ - cpu_notify_nofail(CPU_DOWN_FAILED, cpu); st->rollback = false; } else { /* Cannot happen .... */ @@ -659,22 +586,6 @@ void __init cpuhp_threads_init(void) kthread_unpark(this_cpu_read(cpuhp_state.thread)); } -EXPORT_SYMBOL(register_cpu_notifier); -EXPORT_SYMBOL(__register_cpu_notifier); -void unregister_cpu_notifier(struct notifier_block *nb) -{ - cpu_maps_update_begin(); - raw_notifier_chain_unregister(&cpu_chain, nb); - cpu_maps_update_done(); -} -EXPORT_SYMBOL(unregister_cpu_notifier); - -void __unregister_cpu_notifier(struct notifier_block *nb) -{ - raw_notifier_chain_unregister(&cpu_chain, nb); -} -EXPORT_SYMBOL(__unregister_cpu_notifier); - #ifdef CONFIG_HOTPLUG_CPU /** * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU @@ -741,20 +652,6 @@ static inline void check_for_tasks(int dead_cpu) read_unlock(&tasklist_lock); } -static int notify_down_prepare(unsigned int cpu) -{ - int err, nr_calls = 0; - - err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls); - if (err) { - nr_calls--; - __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL); - pr_warn("%s: attempt to take down CPU %u failed\n", - __func__, cpu); - } - return err; -} - /* Take this CPU down. */ static int take_cpu_down(void *_param) { @@ -833,13 +730,6 @@ static int takedown_cpu(unsigned int cpu) return 0; } -static int notify_dead(unsigned int cpu) -{ - cpu_notify_nofail(CPU_DEAD, cpu); - check_for_tasks(cpu); - return 0; -} - static void cpuhp_complete_idle_dead(void *arg) { struct cpuhp_cpu_state *st = arg; @@ -863,9 +753,7 @@ void cpuhp_report_idle_dead(void) } #else -#define notify_down_prepare NULL #define takedown_cpu NULL -#define notify_dead NULL #endif #ifdef CONFIG_HOTPLUG_CPU @@ -924,9 +812,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE; out: cpu_hotplug_done(); - /* This post dead nonsense must die */ - if (!ret && hasdied) - cpu_notify_nofail(CPU_POST_DEAD, cpu); return ret; } @@ -1292,17 +1177,6 @@ static struct cpuhp_step cpuhp_bp_states[] = { .teardown.single = rcutree_dead_cpu, }, /* - * Preparatory and dead notifiers. Will be replaced once the notifiers - * are converted to states. - */ - [CPUHP_NOTIFY_PREPARE] = { - .name = "notify:prepare", - .startup.single = notify_prepare, - .teardown.single = notify_dead, - .skip_onerr = true, - .cant_stop = true, - }, - /* * On the tear-down path, timers_dead_cpu() must be invoked * before blk_mq_queue_reinit_notify() from notify_dead(), * otherwise a RCU stall occurs. @@ -1391,17 +1265,6 @@ static struct cpuhp_step cpuhp_ap_states[] = { .startup.single = rcutree_online_cpu, .teardown.single = rcutree_offline_cpu, }, - - /* - * Online/down_prepare notifiers. Will be removed once the notifiers - * are converted to states. - */ - [CPUHP_AP_NOTIFY_ONLINE] = { - .name = "notify:online", - .startup.single = notify_online, - .teardown.single = notify_down_prepare, - .skip_onerr = true, - }, #endif /* * The dynamically registered state space is here @@ -1432,23 +1295,53 @@ static int cpuhp_cb_check(enum cpuhp_state state) return 0; } -static void cpuhp_store_callbacks(enum cpuhp_state state, - const char *name, - int (*startup)(unsigned int cpu), - int (*teardown)(unsigned int cpu), - bool multi_instance) +/* + * Returns a free for dynamic slot assignment of the Online state. The states + * are protected by the cpuhp_slot_states mutex and an empty slot is identified + * by having no name assigned. + */ +static int cpuhp_reserve_state(enum cpuhp_state state) +{ + enum cpuhp_state i; + + for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) { + if (!cpuhp_ap_states[i].name) + return i; + } + WARN(1, "No more dynamic states available for CPU hotplug\n"); + return -ENOSPC; +} + +static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name, + int (*startup)(unsigned int cpu), + int (*teardown)(unsigned int cpu), + bool multi_instance) { /* (Un)Install the callbacks for further cpu hotplug operations */ struct cpuhp_step *sp; + int ret = 0; mutex_lock(&cpuhp_state_mutex); + + if (state == CPUHP_AP_ONLINE_DYN) { + ret = cpuhp_reserve_state(state); + if (ret < 0) + goto out; + state = ret; + } sp = cpuhp_get_step(state); + if (name && sp->name) { + ret = -EBUSY; + goto out; + } sp->startup.single = startup; sp->teardown.single = teardown; sp->name = name; sp->multi_instance = multi_instance; INIT_HLIST_HEAD(&sp->list); +out: mutex_unlock(&cpuhp_state_mutex); + return ret; } static void *cpuhp_get_teardown_cb(enum cpuhp_state state) @@ -1509,29 +1402,6 @@ static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state, } } -/* - * Returns a free for dynamic slot assignment of the Online state. The states - * are protected by the cpuhp_slot_states mutex and an empty slot is identified - * by having no name assigned. - */ -static int cpuhp_reserve_state(enum cpuhp_state state) -{ - enum cpuhp_state i; - - mutex_lock(&cpuhp_state_mutex); - for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) { - if (cpuhp_ap_states[i].name) - continue; - - cpuhp_ap_states[i].name = "Reserved"; - mutex_unlock(&cpuhp_state_mutex); - return i; - } - mutex_unlock(&cpuhp_state_mutex); - WARN(1, "No more dynamic states available for CPU hotplug\n"); - return -ENOSPC; -} - int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, bool invoke) { @@ -1580,11 +1450,13 @@ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance); /** * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state - * @state: The state to setup - * @invoke: If true, the startup function is invoked for cpus where - * cpu state >= @state - * @startup: startup callback function - * @teardown: teardown callback function + * @state: The state to setup + * @invoke: If true, the startup function is invoked for cpus where + * cpu state >= @state + * @startup: startup callback function + * @teardown: teardown callback function + * @multi_instance: State is set up for multiple instances which get + * added afterwards. * * Returns: * On success: @@ -1599,25 +1471,23 @@ int __cpuhp_setup_state(enum cpuhp_state state, bool multi_instance) { int cpu, ret = 0; - int dyn_state = 0; + bool dynstate; if (cpuhp_cb_check(state) || !name) return -EINVAL; get_online_cpus(); - /* currently assignments for the ONLINE state are possible */ - if (state == CPUHP_AP_ONLINE_DYN) { - dyn_state = 1; - ret = cpuhp_reserve_state(state); - if (ret < 0) - goto out; + ret = cpuhp_store_callbacks(state, name, startup, teardown, + multi_instance); + + dynstate = state == CPUHP_AP_ONLINE_DYN; + if (ret > 0 && dynstate) { state = ret; + ret = 0; } - cpuhp_store_callbacks(state, name, startup, teardown, multi_instance); - - if (!invoke || !startup) + if (ret || !invoke || !startup) goto out; /* @@ -1641,7 +1511,11 @@ int __cpuhp_setup_state(enum cpuhp_state state, } out: put_online_cpus(); - if (!ret && dyn_state) + /* + * If the requested state is CPUHP_AP_ONLINE_DYN, return the + * dynamically allocated state in case of success. + */ + if (!ret && dynstate) return state; return ret; } diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 29f815d2ef7e..b3088886cd37 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -55,7 +55,7 @@ #include <linux/backing-dev.h> #include <linux/sort.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <linux/atomic.h> #include <linux/mutex.h> #include <linux/cgroup.h> diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 215871bda3a2..d416f3baf392 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1194,7 +1194,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr) /* Reserve the 1st slot for get_trampoline_vaddr() */ set_bit(0, area->bitmap); atomic_set(&area->slot_count, 1); - copy_to_page(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE); + arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE); if (!xol_add_vma(mm, area)) return area; diff --git a/kernel/exit.c b/kernel/exit.c index aacff8e2aec0..8f14b866f9f6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -56,7 +56,7 @@ #include <linux/kcov.h> #include <linux/random.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/unistd.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> diff --git a/kernel/extable.c b/kernel/extable.c index e820ccee9846..e3beec4a2339 100644 --- a/kernel/extable.c +++ b/kernel/extable.c @@ -22,7 +22,7 @@ #include <linux/init.h> #include <asm/sections.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> /* * mutex protecting text section modification (dynamic code patching). diff --git a/kernel/fork.c b/kernel/fork.c index 869b8ccc00bf..11c5c8ab827c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -79,7 +79,7 @@ #include <asm/pgtable.h> #include <asm/pgalloc.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> diff --git a/kernel/futex.c b/kernel/futex.c index 9246d9f593d1..0842c8ca534b 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -2459,7 +2459,7 @@ retry: restart->fn = futex_wait_restart; restart->futex.uaddr = uaddr; restart->futex.val = val; - restart->futex.time = abs_time->tv64; + restart->futex.time = *abs_time; restart->futex.bitset = bitset; restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; @@ -2480,7 +2480,7 @@ static long futex_wait_restart(struct restart_block *restart) ktime_t t, *tp = NULL; if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { - t.tv64 = restart->futex.time; + t = restart->futex.time; tp = &t; } restart->fn = do_no_restart_syscall; diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 4ae3232e7a28..3f409968e466 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c @@ -13,7 +13,7 @@ #include <linux/ptrace.h> #include <linux/syscalls.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> /* diff --git a/kernel/groups.c b/kernel/groups.c index 2fcadd66a8fd..8dd7a61b7115 100644 --- a/kernel/groups.c +++ b/kernel/groups.c @@ -8,7 +8,7 @@ #include <linux/syscalls.h> #include <linux/user_namespace.h> #include <linux/vmalloc.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> struct group_info *groups_alloc(int gidsetsize) { diff --git a/kernel/kmod.c b/kernel/kmod.c index 0277d1216f80..d45c96073afb 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -39,7 +39,7 @@ #include <linux/rwsem.h> #include <linux/ptrace.h> #include <linux/async.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <trace/events/module.h> diff --git a/kernel/kprobes.c b/kernel/kprobes.c index d63095472ea9..43460104f119 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -52,7 +52,7 @@ #include <asm/sections.h> #include <asm/cacheflush.h> #include <asm/errno.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #define KPROBE_HASH_BITS 6 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index a0f61effad25..6d1fcc786081 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -18,7 +18,7 @@ #include <linux/debug_locks.h> #include <linux/vmalloc.h> #include <linux/sort.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/div64.h> #include "lockdep_internals.h" diff --git a/kernel/module.c b/kernel/module.c index f7482db0f843..5088784c0cf9 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -46,7 +46,7 @@ #include <linux/string.h> #include <linux/mutex.h> #include <linux/rculist.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> #include <linux/license.h> diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 4f0f0604f1c4..2d8e2b227db8 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -30,7 +30,7 @@ #include <linux/compiler.h> #include <linux/ktime.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> diff --git a/kernel/power/user.c b/kernel/power/user.c index 35310b627388..22df9f7ff672 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -25,7 +25,7 @@ #include <linux/cpu.h> #include <linux/freezer.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include "power.h" diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index e2cdd87e7a63..8b2696420abb 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -46,7 +46,7 @@ #include <linux/ctype.h> #include <linux/uio.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/sections.h> #define CREATE_TRACE_POINTS diff --git a/kernel/profile.c b/kernel/profile.c index 2dbccf2d806c..f67ce0aa6bc4 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -408,7 +408,7 @@ void profile_tick(int type) #ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> #include <linux/seq_file.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> static int prof_cpu_mask_proc_show(struct seq_file *m, void *v) { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 966556ebdbb3..c56fb57f2991 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1456,7 +1456,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) * yield - it could be a while. */ if (unlikely(queued)) { - ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); + ktime_t to = NSEC_PER_SEC / HZ; set_current_state(TASK_UNINTERRUPTIBLE); schedule_hrtimeout(&to, HRTIMER_MODE_REL); diff --git a/kernel/signal.c b/kernel/signal.c index ae60996fedff..ff046b73ff2d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -39,7 +39,7 @@ #include <trace/events/signal.h> #include <asm/param.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/unistd.h> #include <asm/siginfo.h> #include <asm/cacheflush.h> @@ -587,7 +587,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) struct hrtimer *tmr = &tsk->signal->real_timer; if (!hrtimer_is_queued(tmr) && - tsk->signal->it_real_incr.tv64 != 0) { + tsk->signal->it_real_incr != 0) { hrtimer_forward(tmr, tmr->base->get_time(), tsk->signal->it_real_incr); hrtimer_restart(tmr); @@ -2766,7 +2766,7 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from) int do_sigtimedwait(const sigset_t *which, siginfo_t *info, const struct timespec *ts) { - ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX }; + ktime_t *to = NULL, timeout = KTIME_MAX; struct task_struct *tsk = current; sigset_t mask = *which; int sig, ret = 0; @@ -2786,7 +2786,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info, spin_lock_irq(&tsk->sighand->siglock); sig = dequeue_signal(tsk, &mask, info); - if (!sig && timeout.tv64) { + if (!sig && timeout) { /* * None ready, temporarily unblock those we're interested * while we are sleeping in so that we'll be awakened when diff --git a/kernel/sys.c b/kernel/sys.c index 9758892a2d09..842914ef7de4 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -57,7 +57,7 @@ /* Move somewhere else to avoid recompiling? */ #include <generated/utsrelease.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/io.h> #include <asm/unistd.h> diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 635482e60ca3..8acef8576ce9 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -150,6 +150,9 @@ cond_syscall(sys_io_destroy); cond_syscall(sys_io_submit); cond_syscall(sys_io_cancel); cond_syscall(sys_io_getevents); +cond_syscall(compat_sys_io_setup); +cond_syscall(compat_sys_io_submit); +cond_syscall(compat_sys_io_getevents); cond_syscall(sys_sysfs); cond_syscall(sys_syslog); cond_syscall(sys_process_vm_readv); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 1a292ebcbbb6..8dbaec0e4f7f 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -67,7 +67,7 @@ #include <linux/bpf.h> #include <linux/mount.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/processor.h> #ifdef CONFIG_X86 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 3921cf7fea8e..e6dc9a538efa 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -234,7 +234,7 @@ static int alarmtimer_suspend(struct device *dev) min = freezer_delta; expires = freezer_expires; type = freezer_alarmtype; - freezer_delta = ktime_set(0, 0); + freezer_delta = 0; spin_unlock_irqrestore(&freezer_delta_lock, flags); rtc = alarmtimer_get_rtcdev(); @@ -254,13 +254,13 @@ static int alarmtimer_suspend(struct device *dev) if (!next) continue; delta = ktime_sub(next->expires, base->gettime()); - if (!min.tv64 || (delta.tv64 < min.tv64)) { + if (!min || (delta < min)) { expires = next->expires; min = delta; type = i; } } - if (min.tv64 == 0) + if (min == 0) return 0; if (ktime_to_ns(min) < 2 * NSEC_PER_SEC) { @@ -277,7 +277,7 @@ static int alarmtimer_suspend(struct device *dev) now = ktime_add(now, min); /* Set alarm, if in the past reject suspend briefly to handle */ - ret = rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0)); + ret = rtc_timer_start(rtc, &rtctimer, now, 0); if (ret < 0) __pm_wakeup_event(ws, MSEC_PER_SEC); return ret; @@ -328,7 +328,7 @@ static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type) delta = ktime_sub(absexp, base->gettime()); spin_lock_irqsave(&freezer_delta_lock, flags); - if (!freezer_delta.tv64 || (delta.tv64 < freezer_delta.tv64)) { + if (!freezer_delta || (delta < freezer_delta)) { freezer_delta = delta; freezer_expires = absexp; freezer_alarmtype = type; @@ -453,10 +453,10 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval) delta = ktime_sub(now, alarm->node.expires); - if (delta.tv64 < 0) + if (delta < 0) return 0; - if (unlikely(delta.tv64 >= interval.tv64)) { + if (unlikely(delta >= interval)) { s64 incr = ktime_to_ns(interval); overrun = ktime_divns(delta, incr); @@ -464,7 +464,7 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval) alarm->node.expires = ktime_add_ns(alarm->node.expires, incr*overrun); - if (alarm->node.expires.tv64 > now.tv64) + if (alarm->node.expires > now) return overrun; /* * This (and the ktime_add() below) is the @@ -522,7 +522,7 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm, } /* Re-add periodic timers */ - if (ptr->it.alarm.interval.tv64) { + if (ptr->it.alarm.interval) { ptr->it_overrun += alarm_forward(alarm, now, ptr->it.alarm.interval); result = ALARMTIMER_RESTART; @@ -730,7 +730,7 @@ static int update_rmtp(ktime_t exp, enum alarmtimer_type type, rem = ktime_sub(exp, alarm_bases[type].gettime()); - if (rem.tv64 <= 0) + if (rem <= 0) return 0; rmt = ktime_to_timespec(rem); @@ -755,7 +755,7 @@ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart) struct alarm alarm; int ret = 0; - exp.tv64 = restart->nanosleep.expires; + exp = restart->nanosleep.expires; alarm_init(&alarm, type, alarmtimer_nsleep_wakeup); if (alarmtimer_do_nsleep(&alarm, exp)) @@ -835,7 +835,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, restart = ¤t->restart_block; restart->fn = alarm_timer_nsleep_restart; restart->nanosleep.clockid = type; - restart->nanosleep.expires = exp.tv64; + restart->nanosleep.expires = exp; restart->nanosleep.rmtp = rmtp; ret = -ERESTART_RESTARTBLOCK; diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 2c5bc77c0bb0..97ac0951f164 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -179,7 +179,7 @@ void clockevents_switch_state(struct clock_event_device *dev, void clockevents_shutdown(struct clock_event_device *dev) { clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); - dev->next_event.tv64 = KTIME_MAX; + dev->next_event = KTIME_MAX; } /** @@ -213,7 +213,7 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev) if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { printk_deferred(KERN_WARNING "CE: Reprogramming failure. Giving up\n"); - dev->next_event.tv64 = KTIME_MAX; + dev->next_event = KTIME_MAX; return -ETIME; } @@ -310,7 +310,7 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, int64_t delta; int rc; - if (unlikely(expires.tv64 < 0)) { + if (unlikely(expires < 0)) { WARN_ON_ONCE(1); return -ETIME; } diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 150242ccfcd2..665985b0a89a 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -170,7 +170,7 @@ void clocksource_mark_unstable(struct clocksource *cs) static void clocksource_watchdog(unsigned long data) { struct clocksource *cs; - cycle_t csnow, wdnow, cslast, wdlast, delta; + u64 csnow, wdnow, cslast, wdlast, delta; int64_t wd_nsec, cs_nsec; int next_cpu, reset_pending; diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 08be5c99d26b..c6ecedd3b839 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -50,7 +50,7 @@ #include <linux/timer.h> #include <linux/freezer.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <trace/events/timer.h> @@ -171,7 +171,7 @@ hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) return 0; expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); - return expires.tv64 <= new_base->cpu_base->expires_next.tv64; + return expires <= new_base->cpu_base->expires_next; #else return 0; #endif @@ -313,7 +313,7 @@ ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) * We use KTIME_SEC_MAX here, the maximum timeout which we can * return to user space in a timespec: */ - if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64) + if (res < 0 || res < lhs || res < rhs) res = ktime_set(KTIME_SEC_MAX, 0); return res; @@ -465,8 +465,8 @@ static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base, static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) { struct hrtimer_clock_base *base = cpu_base->clock_base; - ktime_t expires, expires_next = { .tv64 = KTIME_MAX }; unsigned int active = cpu_base->active_bases; + ktime_t expires, expires_next = KTIME_MAX; hrtimer_update_next_timer(cpu_base, NULL); for (; active; base++, active >>= 1) { @@ -479,7 +479,7 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) next = timerqueue_getnext(&base->active); timer = container_of(next, struct hrtimer, node); expires = ktime_sub(hrtimer_get_expires(timer), base->offset); - if (expires.tv64 < expires_next.tv64) { + if (expires < expires_next) { expires_next = expires; hrtimer_update_next_timer(cpu_base, timer); } @@ -489,8 +489,8 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) * the clock bases so the result might be negative. Fix it up * to prevent a false positive in clockevents_program_event(). */ - if (expires_next.tv64 < 0) - expires_next.tv64 = 0; + if (expires_next < 0) + expires_next = 0; return expires_next; } #endif @@ -561,10 +561,10 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) expires_next = __hrtimer_get_next_event(cpu_base); - if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) + if (skip_equal && expires_next == cpu_base->expires_next) return; - cpu_base->expires_next.tv64 = expires_next.tv64; + cpu_base->expires_next = expires_next; /* * If a hang was detected in the last timer interrupt then we @@ -622,10 +622,10 @@ static void hrtimer_reprogram(struct hrtimer *timer, * CLOCK_REALTIME timer might be requested with an absolute * expiry time which is less than base->offset. Set it to 0. */ - if (expires.tv64 < 0) - expires.tv64 = 0; + if (expires < 0) + expires = 0; - if (expires.tv64 >= cpu_base->expires_next.tv64) + if (expires >= cpu_base->expires_next) return; /* Update the pointer to the next expiring timer */ @@ -653,7 +653,7 @@ static void hrtimer_reprogram(struct hrtimer *timer, */ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { - base->expires_next.tv64 = KTIME_MAX; + base->expires_next = KTIME_MAX; base->hres_active = 0; } @@ -827,21 +827,21 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) delta = ktime_sub(now, hrtimer_get_expires(timer)); - if (delta.tv64 < 0) + if (delta < 0) return 0; if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) return 0; - if (interval.tv64 < hrtimer_resolution) - interval.tv64 = hrtimer_resolution; + if (interval < hrtimer_resolution) + interval = hrtimer_resolution; - if (unlikely(delta.tv64 >= interval.tv64)) { + if (unlikely(delta >= interval)) { s64 incr = ktime_to_ns(interval); orun = ktime_divns(delta, incr); hrtimer_add_expires_ns(timer, incr * orun); - if (hrtimer_get_expires_tv64(timer) > now.tv64) + if (hrtimer_get_expires_tv64(timer) > now) return orun; /* * This (and the ktime_add() below) is the @@ -955,7 +955,7 @@ static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim, */ timer->is_rel = mode & HRTIMER_MODE_REL; if (timer->is_rel) - tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution)); + tim = ktime_add_safe(tim, hrtimer_resolution); #endif return tim; } @@ -1104,7 +1104,7 @@ u64 hrtimer_get_next_event(void) raw_spin_lock_irqsave(&cpu_base->lock, flags); if (!__hrtimer_hres_active(cpu_base)) - expires = __hrtimer_get_next_event(cpu_base).tv64; + expires = __hrtimer_get_next_event(cpu_base); raw_spin_unlock_irqrestore(&cpu_base->lock, flags); @@ -1296,7 +1296,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) * are right-of a not yet expired timer, because that * timer will have to trigger a wakeup anyway. */ - if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) + if (basenow < hrtimer_get_softexpires_tv64(timer)) break; __run_hrtimer(cpu_base, base, timer, &basenow); @@ -1318,7 +1318,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) BUG_ON(!cpu_base->hres_active); cpu_base->nr_events++; - dev->next_event.tv64 = KTIME_MAX; + dev->next_event = KTIME_MAX; raw_spin_lock(&cpu_base->lock); entry_time = now = hrtimer_update_base(cpu_base); @@ -1331,7 +1331,7 @@ retry: * timers which run their callback and need to be requeued on * this CPU. */ - cpu_base->expires_next.tv64 = KTIME_MAX; + cpu_base->expires_next = KTIME_MAX; __hrtimer_run_queues(cpu_base, now); @@ -1379,13 +1379,13 @@ retry: cpu_base->hang_detected = 1; raw_spin_unlock(&cpu_base->lock); delta = ktime_sub(now, entry_time); - if ((unsigned int)delta.tv64 > cpu_base->max_hang_time) - cpu_base->max_hang_time = (unsigned int) delta.tv64; + if ((unsigned int)delta > cpu_base->max_hang_time) + cpu_base->max_hang_time = (unsigned int) delta; /* * Limit it to a sensible value as we enforce a longer * delay. Give the CPU at least 100ms to catch up. */ - if (delta.tv64 > 100 * NSEC_PER_MSEC) + if (delta > 100 * NSEC_PER_MSEC) expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); else expires_next = ktime_add(now, delta); @@ -1495,7 +1495,7 @@ static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp) ktime_t rem; rem = hrtimer_expires_remaining(timer); - if (rem.tv64 <= 0) + if (rem <= 0) return 0; rmt = ktime_to_timespec(rem); @@ -1693,7 +1693,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, * Optimize when a zero timeout value is given. It does not * matter whether this is an absolute or a relative time. */ - if (expires && !expires->tv64) { + if (expires && *expires == 0) { __set_current_state(TASK_RUNNING); return 0; } diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c index 2b9f45bc955d..8c89143f9ebf 100644 --- a/kernel/time/itimer.c +++ b/kernel/time/itimer.c @@ -14,7 +14,7 @@ #include <linux/hrtimer.h> #include <trace/events/timer.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> /** * itimer_get_remtime - get remaining time for the timer @@ -34,10 +34,10 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer) * then we return 0 - which is correct. */ if (hrtimer_active(timer)) { - if (rem.tv64 <= 0) - rem.tv64 = NSEC_PER_USEC; + if (rem <= 0) + rem = NSEC_PER_USEC; } else - rem.tv64 = 0; + rem = 0; return ktime_to_timeval(rem); } @@ -216,12 +216,12 @@ again: goto again; } expires = timeval_to_ktime(value->it_value); - if (expires.tv64 != 0) { + if (expires != 0) { tsk->signal->it_real_incr = timeval_to_ktime(value->it_interval); hrtimer_start(timer, expires, HRTIMER_MODE_REL); } else - tsk->signal->it_real_incr.tv64 = 0; + tsk->signal->it_real_incr = 0; trace_itimer_state(ITIMER_REAL, value, 0); spin_unlock_irq(&tsk->sighand->siglock); diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 555e21f7b966..a4a0e478e44d 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -59,9 +59,9 @@ #define JIFFIES_SHIFT 8 #endif -static cycle_t jiffies_read(struct clocksource *cs) +static u64 jiffies_read(struct clocksource *cs) { - return (cycle_t) jiffies; + return (u64) jiffies; } static struct clocksource clocksource_jiffies = { diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 6df8927c58a5..edf19cc53140 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -381,7 +381,7 @@ ktime_t ntp_get_next_leap(void) if ((time_state == TIME_INS) && (time_status & STA_INS)) return ktime_set(ntp_next_leap_sec, 0); - ret.tv64 = KTIME_MAX; + ret = KTIME_MAX; return ret; } diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index f246763c9947..e9e8c10f0d9a 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -6,7 +6,7 @@ #include <linux/posix-timers.h> #include <linux/errno.h> #include <linux/math64.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <linux/kernel_stat.h> #include <trace/events/timer.h> #include <linux/tick.h> diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index f2826c35e918..1e6623d76750 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -36,7 +36,7 @@ #include <linux/time.h> #include <linux/mutex.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <linux/list.h> #include <linux/init.h> #include <linux/compiler.h> @@ -359,7 +359,7 @@ static void schedule_next_timer(struct k_itimer *timr) { struct hrtimer *timer = &timr->it.real.timer; - if (timr->it.real.interval.tv64 == 0) + if (timr->it.real.interval == 0) return; timr->it_overrun += (unsigned int) hrtimer_forward(timer, @@ -449,7 +449,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) timr = container_of(timer, struct k_itimer, it.real.timer); spin_lock_irqsave(&timr->it_lock, flags); - if (timr->it.real.interval.tv64 != 0) + if (timr->it.real.interval != 0) si_private = ++timr->it_requeue_pending; if (posix_timer_event(timr, si_private)) { @@ -458,7 +458,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) * we will not get a call back to restart it AND * it should be restarted. */ - if (timr->it.real.interval.tv64 != 0) { + if (timr->it.real.interval != 0) { ktime_t now = hrtimer_cb_get_time(timer); /* @@ -485,9 +485,9 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) */ #ifdef CONFIG_HIGH_RES_TIMERS { - ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ); + ktime_t kj = NSEC_PER_SEC / HZ; - if (timr->it.real.interval.tv64 < kj.tv64) + if (timr->it.real.interval < kj) now = ktime_add(now, kj); } #endif @@ -743,7 +743,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) iv = timr->it.real.interval; /* interval timer ? */ - if (iv.tv64) + if (iv) cur_setting->it_interval = ktime_to_timespec(iv); else if (!hrtimer_active(timer) && (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) @@ -756,13 +756,13 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) * timer move the expiry time forward by intervals, so * expiry is > now. */ - if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING || - (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) + if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || + (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); remaining = __hrtimer_expires_remaining_adjusted(timer, now); /* Return 0 only, when the timer is expired and not pending */ - if (remaining.tv64 <= 0) { + if (remaining <= 0) { /* * A single shot SIGEV_NONE timer must return 0, when * it is expired ! @@ -839,7 +839,7 @@ common_timer_set(struct k_itimer *timr, int flags, common_timer_get(timr, old_setting); /* disable the timer */ - timr->it.real.interval.tv64 = 0; + timr->it.real.interval = 0; /* * careful here. If smp we could be in the "fire" routine which will * be spinning as we hold the lock. But this is ONLY an SMP issue. @@ -924,7 +924,7 @@ retry: static int common_timer_del(struct k_itimer *timer) { - timer->it.real.interval.tv64 = 0; + timer->it.real.interval = 0; if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0) return TIMER_RETRY; diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c index 690b797f522e..a7bb8f33ae07 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c @@ -97,7 +97,7 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t) ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer); if (clockevent_state_oneshot(&ce_broadcast_hrtimer)) - if (ce_broadcast_hrtimer.next_event.tv64 != KTIME_MAX) + if (ce_broadcast_hrtimer.next_event != KTIME_MAX) return HRTIMER_RESTART; return HRTIMER_NORESTART; diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index d2a20e83ebae..3109204c87cc 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -604,14 +604,14 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) bool bc_local; raw_spin_lock(&tick_broadcast_lock); - dev->next_event.tv64 = KTIME_MAX; - next_event.tv64 = KTIME_MAX; + dev->next_event = KTIME_MAX; + next_event = KTIME_MAX; cpumask_clear(tmpmask); now = ktime_get(); /* Find all expired events */ for_each_cpu(cpu, tick_broadcast_oneshot_mask) { td = &per_cpu(tick_cpu_device, cpu); - if (td->evtdev->next_event.tv64 <= now.tv64) { + if (td->evtdev->next_event <= now) { cpumask_set_cpu(cpu, tmpmask); /* * Mark the remote cpu in the pending mask, so @@ -619,8 +619,8 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) * timer in tick_broadcast_oneshot_control(). */ cpumask_set_cpu(cpu, tick_broadcast_pending_mask); - } else if (td->evtdev->next_event.tv64 < next_event.tv64) { - next_event.tv64 = td->evtdev->next_event.tv64; + } else if (td->evtdev->next_event < next_event) { + next_event = td->evtdev->next_event; next_cpu = cpu; } } @@ -657,7 +657,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) * - There are pending events on sleeping CPUs which were not * in the event mask */ - if (next_event.tv64 != KTIME_MAX) + if (next_event != KTIME_MAX) tick_broadcast_set_event(dev, next_cpu, next_event); raw_spin_unlock(&tick_broadcast_lock); @@ -672,7 +672,7 @@ static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu) { if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER)) return 0; - if (bc->next_event.tv64 == KTIME_MAX) + if (bc->next_event == KTIME_MAX) return 0; return bc->bound_on == cpu ? -EBUSY : 0; } @@ -688,7 +688,7 @@ static void broadcast_shutdown_local(struct clock_event_device *bc, if (bc->features & CLOCK_EVT_FEAT_HRTIMER) { if (broadcast_needs_cpu(bc, smp_processor_id())) return; - if (dev->next_event.tv64 < bc->next_event.tv64) + if (dev->next_event < bc->next_event) return; } clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); @@ -754,7 +754,7 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) */ if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) { ret = -EBUSY; - } else if (dev->next_event.tv64 < bc->next_event.tv64) { + } else if (dev->next_event < bc->next_event) { tick_broadcast_set_event(bc, cpu, dev->next_event); /* * In case of hrtimer broadcasts the @@ -789,7 +789,7 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) /* * Bail out if there is no next event. */ - if (dev->next_event.tv64 == KTIME_MAX) + if (dev->next_event == KTIME_MAX) goto out; /* * If the pending bit is not set, then we are @@ -824,7 +824,7 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) * nohz fixups. */ now = ktime_get(); - if (dev->next_event.tv64 <= now.tv64) { + if (dev->next_event <= now) { cpumask_set_cpu(cpu, tick_broadcast_force_mask); goto out; } @@ -897,7 +897,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) tick_next_period); tick_broadcast_set_event(bc, cpu, tick_next_period); } else - bc->next_event.tv64 = KTIME_MAX; + bc->next_event = KTIME_MAX; } else { /* * The first cpu which switches to oneshot mode sets diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 4fcd99e12aa0..49edc1c4f3e6 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -178,8 +178,8 @@ static void tick_setup_device(struct tick_device *td, struct clock_event_device *newdev, int cpu, const struct cpumask *cpumask) { - ktime_t next_event; void (*handler)(struct clock_event_device *) = NULL; + ktime_t next_event = 0; /* * First device setup ? @@ -195,7 +195,7 @@ static void tick_setup_device(struct tick_device *td, else tick_do_timer_cpu = TICK_DO_TIMER_NONE; tick_next_period = ktime_get(); - tick_period = ktime_set(0, NSEC_PER_SEC / HZ); + tick_period = NSEC_PER_SEC / HZ; } /* diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index b51344652330..6b009c207671 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c @@ -28,7 +28,7 @@ int tick_program_event(ktime_t expires, int force) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); - if (unlikely(expires.tv64 == KTIME_MAX)) { + if (unlikely(expires == KTIME_MAX)) { /* * We don't need the clock event device any more, stop it. */ diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 71496a20e670..2c115fdab397 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -58,21 +58,21 @@ static void tick_do_update_jiffies64(ktime_t now) * Do a quick check without holding jiffies_lock: */ delta = ktime_sub(now, last_jiffies_update); - if (delta.tv64 < tick_period.tv64) + if (delta < tick_period) return; /* Reevaluate with jiffies_lock held */ write_seqlock(&jiffies_lock); delta = ktime_sub(now, last_jiffies_update); - if (delta.tv64 >= tick_period.tv64) { + if (delta >= tick_period) { delta = ktime_sub(delta, tick_period); last_jiffies_update = ktime_add(last_jiffies_update, tick_period); /* Slow path for long timeouts */ - if (unlikely(delta.tv64 >= tick_period.tv64)) { + if (unlikely(delta >= tick_period)) { s64 incr = ktime_to_ns(tick_period); ticks = ktime_divns(delta, incr); @@ -101,7 +101,7 @@ static ktime_t tick_init_jiffy_update(void) write_seqlock(&jiffies_lock); /* Did we start the jiffies update yet ? */ - if (last_jiffies_update.tv64 == 0) + if (last_jiffies_update == 0) last_jiffies_update = tick_next_period; period = last_jiffies_update; write_sequnlock(&jiffies_lock); @@ -669,7 +669,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, /* Read jiffies and the time when jiffies were updated last */ do { seq = read_seqbegin(&jiffies_lock); - basemono = last_jiffies_update.tv64; + basemono = last_jiffies_update; basejiff = jiffies; } while (read_seqretry(&jiffies_lock, seq)); ts->last_jiffies = basejiff; @@ -697,7 +697,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, */ delta = next_tick - basemono; if (delta <= (u64)TICK_NSEC) { - tick.tv64 = 0; + tick = 0; /* * Tell the timer code that the base is not idle, i.e. undo @@ -764,10 +764,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, expires = KTIME_MAX; expires = min_t(u64, expires, next_tick); - tick.tv64 = expires; + tick = expires; /* Skip reprogram of event if its not changed */ - if (ts->tick_stopped && (expires == dev->next_event.tv64)) + if (ts->tick_stopped && (expires == dev->next_event)) goto out; /* @@ -864,7 +864,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) } if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) { - ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ }; + ts->sleep_length = NSEC_PER_SEC / HZ; return false; } @@ -914,7 +914,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts) ts->idle_calls++; expires = tick_nohz_stop_sched_tick(ts, now, cpu); - if (expires.tv64 > 0LL) { + if (expires > 0LL) { ts->idle_sleeps++; ts->idle_expires = expires; } @@ -1051,7 +1051,7 @@ static void tick_nohz_handler(struct clock_event_device *dev) struct pt_regs *regs = get_irq_regs(); ktime_t now = ktime_get(); - dev->next_event.tv64 = KTIME_MAX; + dev->next_event = KTIME_MAX; tick_sched_do_timer(now); tick_sched_handle(ts, regs); diff --git a/kernel/time/time.c b/kernel/time/time.c index bd62fb8e8e77..a3a9a8a029dc 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -38,7 +38,7 @@ #include <linux/math64.h> #include <linux/ptrace.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/unistd.h> #include <generated/timeconst.h> diff --git a/kernel/time/timecounter.c b/kernel/time/timecounter.c index 4687b3104bae..8afd78932bdf 100644 --- a/kernel/time/timecounter.c +++ b/kernel/time/timecounter.c @@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(timecounter_init); */ static u64 timecounter_read_delta(struct timecounter *tc) { - cycle_t cycle_now, cycle_delta; + u64 cycle_now, cycle_delta; u64 ns_offset; /* read cycle counter: */ @@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(timecounter_read); * time previous to the time stored in the cycle counter. */ static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, - cycle_t cycles, u64 mask, u64 frac) + u64 cycles, u64 mask, u64 frac) { u64 ns = (u64) cycles; @@ -90,7 +90,7 @@ static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, } u64 timecounter_cyc2time(struct timecounter *tc, - cycle_t cycle_tstamp) + u64 cycle_tstamp) { u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; u64 nsec = tc->nsec, frac = tc->frac; diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index da233cdf89b0..db087d7e106d 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -104,7 +104,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm) */ set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec, -tk->wall_to_monotonic.tv_nsec); - WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64); + WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp)); tk->wall_to_monotonic = wtm; set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec); tk->offs_real = timespec64_to_ktime(tmp); @@ -119,10 +119,10 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) #ifdef CONFIG_DEBUG_TIMEKEEPING #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */ -static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset) +static void timekeeping_check_update(struct timekeeper *tk, u64 offset) { - cycle_t max_cycles = tk->tkr_mono.clock->max_cycles; + u64 max_cycles = tk->tkr_mono.clock->max_cycles; const char *name = tk->tkr_mono.clock->name; if (offset > max_cycles) { @@ -158,10 +158,10 @@ static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset) } } -static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) +static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) { struct timekeeper *tk = &tk_core.timekeeper; - cycle_t now, last, mask, max, delta; + u64 now, last, mask, max, delta; unsigned int seq; /* @@ -199,12 +199,12 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) return delta; } #else -static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset) +static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset) { } -static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) +static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) { - cycle_t cycle_now, delta; + u64 cycle_now, delta; /* read clocksource */ cycle_now = tkr->read(tkr->clock); @@ -229,7 +229,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) */ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) { - cycle_t interval; + u64 interval; u64 tmp, ntpinterval; struct clocksource *old_clock; @@ -254,7 +254,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) if (tmp == 0) tmp = 1; - interval = (cycle_t) tmp; + interval = (u64) tmp; tk->cycle_interval = interval; /* Go back from cycles -> shifted ns */ @@ -298,8 +298,7 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset; static inline u32 arch_gettimeoffset(void) { return 0; } #endif -static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, - cycle_t delta) +static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta) { u64 nsec; @@ -312,16 +311,15 @@ static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, static inline u64 timekeeping_get_ns(struct tk_read_base *tkr) { - cycle_t delta; + u64 delta; delta = timekeeping_get_delta(tkr); return timekeeping_delta_to_ns(tkr, delta); } -static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, - cycle_t cycles) +static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles) { - cycle_t delta; + u64 delta; /* calculate the delta since the last update_wall_time */ delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask); @@ -454,9 +452,9 @@ u64 notrace ktime_get_boot_fast_ns(void) EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns); /* Suspend-time cycles value for halted fast timekeeper. */ -static cycle_t cycles_at_suspend; +static u64 cycles_at_suspend; -static cycle_t dummy_clock_read(struct clocksource *cs) +static u64 dummy_clock_read(struct clocksource *cs) { return cycles_at_suspend; } @@ -573,7 +571,7 @@ EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier); static inline void tk_update_leap_state(struct timekeeper *tk) { tk->next_leap_ktime = ntp_get_next_leap(); - if (tk->next_leap_ktime.tv64 != KTIME_MAX) + if (tk->next_leap_ktime != KTIME_MAX) /* Convert to monotonic time */ tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real); } @@ -650,7 +648,7 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) static void timekeeping_forward_now(struct timekeeper *tk) { struct clocksource *clock = tk->tkr_mono.clock; - cycle_t cycle_now, delta; + u64 cycle_now, delta; u64 nsec; cycle_now = tk->tkr_mono.read(clock); @@ -923,7 +921,7 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot) ktime_t base_real; u64 nsec_raw; u64 nsec_real; - cycle_t now; + u64 now; WARN_ON_ONCE(timekeeping_suspended); @@ -982,8 +980,8 @@ static int scale64_check_overflow(u64 mult, u64 div, u64 *base) * interval is partial_history_cycles. */ static int adjust_historical_crosststamp(struct system_time_snapshot *history, - cycle_t partial_history_cycles, - cycle_t total_history_cycles, + u64 partial_history_cycles, + u64 total_history_cycles, bool discontinuity, struct system_device_crosststamp *ts) { @@ -1047,7 +1045,7 @@ static int adjust_historical_crosststamp(struct system_time_snapshot *history, /* * cycle_between - true if test occurs chronologically between before and after */ -static bool cycle_between(cycle_t before, cycle_t test, cycle_t after) +static bool cycle_between(u64 before, u64 test, u64 after) { if (test > before && test < after) return true; @@ -1077,7 +1075,7 @@ int get_device_system_crosststamp(int (*get_time_fn) { struct system_counterval_t system_counterval; struct timekeeper *tk = &tk_core.timekeeper; - cycle_t cycles, now, interval_start; + u64 cycles, now, interval_start; unsigned int clock_was_set_seq = 0; ktime_t base_real, base_raw; u64 nsec_real, nsec_raw; @@ -1138,7 +1136,7 @@ int get_device_system_crosststamp(int (*get_time_fn) * current interval */ if (do_interp) { - cycle_t partial_history_cycles, total_history_cycles; + u64 partial_history_cycles, total_history_cycles; bool discontinuity; /* @@ -1644,7 +1642,7 @@ void timekeeping_resume(void) struct clocksource *clock = tk->tkr_mono.clock; unsigned long flags; struct timespec64 ts_new, ts_delta; - cycle_t cycle_now; + u64 cycle_now; sleeptime_injected = false; read_persistent_clock64(&ts_new); @@ -2010,11 +2008,10 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) * * Returns the unconsumed cycles. */ -static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, - u32 shift, - unsigned int *clock_set) +static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset, + u32 shift, unsigned int *clock_set) { - cycle_t interval = tk->cycle_interval << shift; + u64 interval = tk->cycle_interval << shift; u64 raw_nsecs; /* If the offset is smaller than a shifted interval, do nothing */ @@ -2055,7 +2052,7 @@ void update_wall_time(void) { struct timekeeper *real_tk = &tk_core.timekeeper; struct timekeeper *tk = &shadow_timekeeper; - cycle_t offset; + u64 offset; int shift = 0, maxshift; unsigned int clock_set = 0; unsigned long flags; @@ -2253,7 +2250,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real, } /* Handle leapsecond insertion adjustments */ - if (unlikely(base.tv64 >= tk->next_leap_ktime.tv64)) + if (unlikely(base >= tk->next_leap_ktime)) *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0)); } while (read_seqcount_retry(&tk_core.seq, seq)); diff --git a/kernel/time/timekeeping_internal.h b/kernel/time/timekeeping_internal.h index 5be76270ec4a..9a18f121f399 100644 --- a/kernel/time/timekeeping_internal.h +++ b/kernel/time/timekeeping_internal.h @@ -13,9 +13,9 @@ extern void tk_debug_account_sleep_time(struct timespec64 *t); #endif #ifdef CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE -static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask) +static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) { - cycle_t ret = (now - last) & mask; + u64 ret = (now - last) & mask; /* * Prevent time going backwards by checking the MSB of mask in @@ -24,7 +24,7 @@ static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask) return ret & ~(mask >> 1) ? 0 : ret; } #else -static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask) +static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) { return (now - last) & mask; } diff --git a/kernel/time/timer.c b/kernel/time/timer.c index ea4fbf8477a9..ec33a6933eae 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -43,7 +43,7 @@ #include <linux/slab.h> #include <linux/compat.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/unistd.h> #include <asm/div64.h> #include <asm/timex.h> diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index ba7d8b288bb3..afe6cd1944fc 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -17,7 +17,7 @@ #include <linux/seq_file.h> #include <linux/kallsyms.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include "tick-internal.h" diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index 087204c733eb..afddded947df 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c @@ -43,7 +43,7 @@ #include <linux/seq_file.h> #include <linux/kallsyms.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> /* * This is our basic unit of interest: a timer expiry event identified diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1f0f547c54da..eb230f06ba41 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2847,7 +2847,7 @@ static void ftrace_shutdown_sysctl(void) } } -static cycle_t ftrace_update_time; +static u64 ftrace_update_time; unsigned long ftrace_update_tot_cnt; static inline int ops_traces_mod(struct ftrace_ops *ops) @@ -2894,7 +2894,7 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) { struct ftrace_page *pg; struct dyn_ftrace *p; - cycle_t start, stop; + u64 start, stop; unsigned long update_cnt = 0; unsigned long rec_flags = 0; int i; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 66f829c47bec..d7449783987a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -236,7 +236,7 @@ static int __init set_tracepoint_printk(char *str) } __setup("tp_printk", set_tracepoint_printk); -unsigned long long ns2usecs(cycle_t nsec) +unsigned long long ns2usecs(u64 nsec) { nsec += 500; do_div(nsec, 1000); @@ -573,7 +573,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids, return read; } -static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) +static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu) { u64 ts; @@ -587,7 +587,7 @@ static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) return ts; } -cycle_t ftrace_now(int cpu) +u64 ftrace_now(int cpu) { return buffer_ftrace_now(&global_trace.trace_buffer, cpu); } diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index c2234494f40c..1ea51ab53edf 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -159,7 +159,7 @@ struct trace_array_cpu { unsigned long policy; unsigned long rt_priority; unsigned long skipped_entries; - cycle_t preempt_timestamp; + u64 preempt_timestamp; pid_t pid; kuid_t uid; char comm[TASK_COMM_LEN]; @@ -177,7 +177,7 @@ struct trace_buffer { struct trace_array *tr; struct ring_buffer *buffer; struct trace_array_cpu __percpu *data; - cycle_t time_start; + u64 time_start; int cpu; }; @@ -689,7 +689,7 @@ static inline void __trace_stack(struct trace_array *tr, unsigned long flags, } #endif /* CONFIG_STACKTRACE */ -extern cycle_t ftrace_now(int cpu); +extern u64 ftrace_now(int cpu); extern void trace_find_cmdline(int pid, char comm[]); extern void trace_event_follow_fork(struct trace_array *tr, bool enable); @@ -736,7 +736,7 @@ extern int trace_selftest_startup_branch(struct tracer *trace, #endif /* CONFIG_FTRACE_STARTUP_TEST */ extern void *head_page(struct trace_array_cpu *data); -extern unsigned long long ns2usecs(cycle_t nsec); +extern unsigned long long ns2usecs(u64 nsec); extern int trace_vbprintk(unsigned long ip, const char *fmt, va_list args); extern int diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 86654d7e1afe..7758bc0617cb 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -298,7 +298,7 @@ static void irqsoff_print_header(struct seq_file *s) /* * Should this new latency be reported/recorded? */ -static bool report_latency(struct trace_array *tr, cycle_t delta) +static bool report_latency(struct trace_array *tr, u64 delta) { if (tracing_thresh) { if (delta < tracing_thresh) @@ -316,7 +316,7 @@ check_critical_timing(struct trace_array *tr, unsigned long parent_ip, int cpu) { - cycle_t T0, T1, delta; + u64 T0, T1, delta; unsigned long flags; int pc; diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 5d0bb025bb21..ddec53b67646 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -358,7 +358,7 @@ static void wakeup_print_header(struct seq_file *s) /* * Should this new latency be reported/recorded? */ -static bool report_latency(struct trace_array *tr, cycle_t delta) +static bool report_latency(struct trace_array *tr, u64 delta) { if (tracing_thresh) { if (delta < tracing_thresh) @@ -440,7 +440,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt, struct task_struct *prev, struct task_struct *next) { struct trace_array_cpu *data; - cycle_t T0, T1, delta; + u64 T0, T1, delta; unsigned long flags; long disabled; int cpu; diff --git a/kernel/uid16.c b/kernel/uid16.c index cc40793464e3..71645ae9303a 100644 --- a/kernel/uid16.c +++ b/kernel/uid16.c @@ -14,7 +14,7 @@ #include <linux/security.h> #include <linux/syscalls.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> SYSCALL_DEFINE3(chown16, const char __user *, filename, old_uid_t, user, old_gid_t, group) { |