diff options
Diffstat (limited to 'arch/mips/include')
-rw-r--r-- | arch/mips/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/mips/include/asm/atomic.h | 561 | ||||
-rw-r--r-- | arch/mips/include/asm/kvm_host.h | 16 | ||||
-rw-r--r-- | arch/mips/include/asm/processor.h | 6 | ||||
-rw-r--r-- | arch/mips/include/asm/suspend.h | 7 | ||||
-rw-r--r-- | arch/mips/include/uapi/asm/ioctls.h | 2 |
6 files changed, 203 insertions, 390 deletions
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 335e5290ec75..57012ef1f51e 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -3,6 +3,7 @@ generic-y += cputime.h generic-y += current.h generic-y += emergency-restart.h generic-y += hash.h +generic-y += irq_work.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mutex.h diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 37b2befe651a..6dd6bfc607e9 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -29,7 +29,7 @@ * * Atomically reads the value of @v. */ -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) /* * atomic_set - set atomic variable @@ -40,195 +40,103 @@ */ #define atomic_set(v, i) ((v)->counter = (i)) -/* - * atomic_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v. - */ -static __inline__ void atomic_add(int i, atomic_t * v) -{ - if (kernel_uses_llsc && R10000_LLSC_WAR) { - int temp; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: ll %0, %1 # atomic_add \n" - " addu %0, %2 \n" - " sc %0, %1 \n" - " beqzl %0, 1b \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } else if (kernel_uses_llsc) { - int temp; - - do { - __asm__ __volatile__( - " .set arch=r4000 \n" - " ll %0, %1 # atomic_add \n" - " addu %0, %2 \n" - " sc %0, %1 \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!temp)); - } else { - unsigned long flags; - - raw_local_irq_save(flags); - v->counter += i; - raw_local_irq_restore(flags); - } -} - -/* - * atomic_sub - subtract the atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v. - */ -static __inline__ void atomic_sub(int i, atomic_t * v) -{ - if (kernel_uses_llsc && R10000_LLSC_WAR) { - int temp; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: ll %0, %1 # atomic_sub \n" - " subu %0, %2 \n" - " sc %0, %1 \n" - " beqzl %0, 1b \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } else if (kernel_uses_llsc) { - int temp; - - do { - __asm__ __volatile__( - " .set arch=r4000 \n" - " ll %0, %1 # atomic_sub \n" - " subu %0, %2 \n" - " sc %0, %1 \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!temp)); - } else { - unsigned long flags; - - raw_local_irq_save(flags); - v->counter -= i; - raw_local_irq_restore(flags); - } -} - -/* - * Same as above, but return the result value - */ -static __inline__ int atomic_add_return(int i, atomic_t * v) -{ - int result; - - smp_mb__before_llsc(); - - if (kernel_uses_llsc && R10000_LLSC_WAR) { - int temp; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: ll %1, %2 # atomic_add_return \n" - " addu %0, %1, %3 \n" - " sc %0, %2 \n" - " beqzl %0, 1b \n" - " addu %0, %1, %3 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } else if (kernel_uses_llsc) { - int temp; - - do { - __asm__ __volatile__( - " .set arch=r4000 \n" - " ll %1, %2 # atomic_add_return \n" - " addu %0, %1, %3 \n" - " sc %0, %2 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!result)); - - result = temp + i; - } else { - unsigned long flags; - - raw_local_irq_save(flags); - result = v->counter; - result += i; - v->counter = result; - raw_local_irq_restore(flags); - } - - smp_llsc_mb(); - - return result; +#define ATOMIC_OP(op, c_op, asm_op) \ +static __inline__ void atomic_##op(int i, atomic_t * v) \ +{ \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + int temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: ll %0, %1 # atomic_" #op " \n" \ + " " #asm_op " %0, %2 \n" \ + " sc %0, %1 \n" \ + " beqzl %0, 1b \n" \ + " .set mips0 \n" \ + : "=&r" (temp), "+m" (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + int temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + " ll %0, %1 # atomic_" #op "\n" \ + " " #asm_op " %0, %2 \n" \ + " sc %0, %1 \n" \ + " .set mips0 \n" \ + : "=&r" (temp), "+m" (v->counter) \ + : "Ir" (i)); \ + } while (unlikely(!temp)); \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + v->counter c_op i; \ + raw_local_irq_restore(flags); \ + } \ +} \ + +#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ +static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ +{ \ + int result; \ + \ + smp_mb__before_llsc(); \ + \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + int temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: ll %1, %2 # atomic_" #op "_return \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " sc %0, %2 \n" \ + " beqzl %0, 1b \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + int temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + " ll %1, %2 # atomic_" #op "_return \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " sc %0, %2 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ + : "Ir" (i)); \ + } while (unlikely(!result)); \ + \ + result = temp; result c_op i; \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + result = v->counter; \ + result c_op i; \ + v->counter = result; \ + raw_local_irq_restore(flags); \ + } \ + \ + smp_llsc_mb(); \ + \ + return result; \ } -static __inline__ int atomic_sub_return(int i, atomic_t * v) -{ - int result; +#define ATOMIC_OPS(op, c_op, asm_op) \ + ATOMIC_OP(op, c_op, asm_op) \ + ATOMIC_OP_RETURN(op, c_op, asm_op) - smp_mb__before_llsc(); +ATOMIC_OPS(add, +=, addu) +ATOMIC_OPS(sub, -=, subu) - if (kernel_uses_llsc && R10000_LLSC_WAR) { - int temp; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: ll %1, %2 # atomic_sub_return \n" - " subu %0, %1, %3 \n" - " sc %0, %2 \n" - " beqzl %0, 1b \n" - " subu %0, %1, %3 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); - - result = temp - i; - } else if (kernel_uses_llsc) { - int temp; - - do { - __asm__ __volatile__( - " .set arch=r4000 \n" - " ll %1, %2 # atomic_sub_return \n" - " subu %0, %1, %3 \n" - " sc %0, %2 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!result)); - - result = temp - i; - } else { - unsigned long flags; - - raw_local_irq_save(flags); - result = v->counter; - result -= i; - v->counter = result; - raw_local_irq_restore(flags); - } - - smp_llsc_mb(); - - return result; -} +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP /* * atomic_sub_if_positive - conditionally subtract integer from atomic variable @@ -398,7 +306,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) * @v: pointer of type atomic64_t * */ -#define atomic64_read(v) (*(volatile long *)&(v)->counter) +#define atomic64_read(v) ACCESS_ONCE((v)->counter) /* * atomic64_set - set atomic variable @@ -407,195 +315,104 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) */ #define atomic64_set(v, i) ((v)->counter = (i)) -/* - * atomic64_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic64_t - * - * Atomically adds @i to @v. - */ -static __inline__ void atomic64_add(long i, atomic64_t * v) -{ - if (kernel_uses_llsc && R10000_LLSC_WAR) { - long temp; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: lld %0, %1 # atomic64_add \n" - " daddu %0, %2 \n" - " scd %0, %1 \n" - " beqzl %0, 1b \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } else if (kernel_uses_llsc) { - long temp; - - do { - __asm__ __volatile__( - " .set arch=r4000 \n" - " lld %0, %1 # atomic64_add \n" - " daddu %0, %2 \n" - " scd %0, %1 \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!temp)); - } else { - unsigned long flags; - - raw_local_irq_save(flags); - v->counter += i; - raw_local_irq_restore(flags); - } +#define ATOMIC64_OP(op, c_op, asm_op) \ +static __inline__ void atomic64_##op(long i, atomic64_t * v) \ +{ \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + long temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: lld %0, %1 # atomic64_" #op " \n" \ + " " #asm_op " %0, %2 \n" \ + " scd %0, %1 \n" \ + " beqzl %0, 1b \n" \ + " .set mips0 \n" \ + : "=&r" (temp), "+m" (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + long temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + " lld %0, %1 # atomic64_" #op "\n" \ + " " #asm_op " %0, %2 \n" \ + " scd %0, %1 \n" \ + " .set mips0 \n" \ + : "=&r" (temp), "+m" (v->counter) \ + : "Ir" (i)); \ + } while (unlikely(!temp)); \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + v->counter c_op i; \ + raw_local_irq_restore(flags); \ + } \ +} \ + +#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ +static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ +{ \ + long result; \ + \ + smp_mb__before_llsc(); \ + \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + long temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: lld %1, %2 # atomic64_" #op "_return\n" \ + " " #asm_op " %0, %1, %3 \n" \ + " scd %0, %2 \n" \ + " beqzl %0, 1b \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + long temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + " lld %1, %2 # atomic64_" #op "_return\n" \ + " " #asm_op " %0, %1, %3 \n" \ + " scd %0, %2 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), "=m" (v->counter) \ + : "Ir" (i), "m" (v->counter) \ + : "memory"); \ + } while (unlikely(!result)); \ + \ + result = temp; result c_op i; \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + result = v->counter; \ + result c_op i; \ + v->counter = result; \ + raw_local_irq_restore(flags); \ + } \ + \ + smp_llsc_mb(); \ + \ + return result; \ } -/* - * atomic64_sub - subtract the atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic64_t - * - * Atomically subtracts @i from @v. - */ -static __inline__ void atomic64_sub(long i, atomic64_t * v) -{ - if (kernel_uses_llsc && R10000_LLSC_WAR) { - long temp; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: lld %0, %1 # atomic64_sub \n" - " dsubu %0, %2 \n" - " scd %0, %1 \n" - " beqzl %0, 1b \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } else if (kernel_uses_llsc) { - long temp; - - do { - __asm__ __volatile__( - " .set arch=r4000 \n" - " lld %0, %1 # atomic64_sub \n" - " dsubu %0, %2 \n" - " scd %0, %1 \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!temp)); - } else { - unsigned long flags; - - raw_local_irq_save(flags); - v->counter -= i; - raw_local_irq_restore(flags); - } -} - -/* - * Same as above, but return the result value - */ -static __inline__ long atomic64_add_return(long i, atomic64_t * v) -{ - long result; +#define ATOMIC64_OPS(op, c_op, asm_op) \ + ATOMIC64_OP(op, c_op, asm_op) \ + ATOMIC64_OP_RETURN(op, c_op, asm_op) - smp_mb__before_llsc(); +ATOMIC64_OPS(add, +=, daddu) +ATOMIC64_OPS(sub, -=, dsubu) - if (kernel_uses_llsc && R10000_LLSC_WAR) { - long temp; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: lld %1, %2 # atomic64_add_return \n" - " daddu %0, %1, %3 \n" - " scd %0, %2 \n" - " beqzl %0, 1b \n" - " daddu %0, %1, %3 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } else if (kernel_uses_llsc) { - long temp; - - do { - __asm__ __volatile__( - " .set arch=r4000 \n" - " lld %1, %2 # atomic64_add_return \n" - " daddu %0, %1, %3 \n" - " scd %0, %2 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); - } while (unlikely(!result)); - - result = temp + i; - } else { - unsigned long flags; - - raw_local_irq_save(flags); - result = v->counter; - result += i; - v->counter = result; - raw_local_irq_restore(flags); - } - - smp_llsc_mb(); - - return result; -} - -static __inline__ long atomic64_sub_return(long i, atomic64_t * v) -{ - long result; - - smp_mb__before_llsc(); - - if (kernel_uses_llsc && R10000_LLSC_WAR) { - long temp; - - __asm__ __volatile__( - " .set arch=r4000 \n" - "1: lld %1, %2 # atomic64_sub_return \n" - " dsubu %0, %1, %3 \n" - " scd %0, %2 \n" - " beqzl %0, 1b \n" - " dsubu %0, %1, %3 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); - } else if (kernel_uses_llsc) { - long temp; - - do { - __asm__ __volatile__( - " .set arch=r4000 \n" - " lld %1, %2 # atomic64_sub_return \n" - " dsubu %0, %1, %3 \n" - " scd %0, %2 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); - } while (unlikely(!result)); - - result = temp - i; - } else { - unsigned long flags; - - raw_local_irq_save(flags); - result = v->counter; - result -= i; - v->counter = result; - raw_local_irq_restore(flags); - } - - smp_llsc_mb(); - - return result; -} +#undef ATOMIC64_OPS +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP /* * atomic64_sub_if_positive - conditionally subtract integer from atomic variable diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 7a3fc67bd7f9..f2c249796ea8 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -96,11 +96,6 @@ #define CAUSEB_DC 27 #define CAUSEF_DC (_ULCAST_(1) << 27) -struct kvm; -struct kvm_run; -struct kvm_vcpu; -struct kvm_interrupt; - extern atomic_t kvm_mips_instance; extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn); extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn); @@ -767,5 +762,16 @@ extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu); extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); +static inline void kvm_arch_hardware_disable(void) {} +static inline void kvm_arch_hardware_unsetup(void) {} +static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_free_memslot(struct kvm *kvm, + struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} +static inline void kvm_arch_memslots_updated(struct kvm *kvm) {} +static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} +static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) {} +static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} #endif /* __MIPS_KVM_HOST_H__ */ diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 05f08438a7c4..f1df4cb4a286 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -397,12 +397,6 @@ unsigned long get_wchan(struct task_struct *p); #define ARCH_HAS_PREFETCHW #define prefetchw(x) __builtin_prefetch((x), 1, 1) -/* - * See Documentation/scheduler/sched-arch.txt; prevents deadlock on SMP - * systems. - */ -#define __ARCH_WANT_UNLOCKED_CTXSW - #endif #endif /* _ASM_PROCESSOR_H */ diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h deleted file mode 100644 index 3adac3b53d19..000000000000 --- a/arch/mips/include/asm/suspend.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef __ASM_SUSPEND_H -#define __ASM_SUSPEND_H - -/* References to section boundaries */ -extern const void __nosave_begin, __nosave_end; - -#endif /* __ASM_SUSPEND_H */ diff --git a/arch/mips/include/uapi/asm/ioctls.h b/arch/mips/include/uapi/asm/ioctls.h index b1e637757fe3..740219c2c894 100644 --- a/arch/mips/include/uapi/asm/ioctls.h +++ b/arch/mips/include/uapi/asm/ioctls.h @@ -81,6 +81,8 @@ #define TCSETS2 _IOW('T', 0x2B, struct termios2) #define TCSETSW2 _IOW('T', 0x2C, struct termios2) #define TCSETSF2 _IOW('T', 0x2D, struct termios2) +#define TIOCGRS485 _IOR('T', 0x2E, struct serial_rs485) +#define TIOCSRS485 _IOWR('T', 0x2F, struct serial_rs485) #define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ #define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */ |