diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2011-05-04 14:38:26 -0400 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2011-05-12 15:52:12 -0400 |
commit | 18aecc2b645bbb07851b196452a2af314222069b (patch) | |
tree | 959f765f69af01046c6e26db12b45c3390799d3e /arch/tile/include/asm/spinlock_64.h | |
parent | be84cb43833ee40a42e08f5425d20310f16229c7 (diff) | |
download | blackbird-op-linux-18aecc2b645bbb07851b196452a2af314222069b.tar.gz blackbird-op-linux-18aecc2b645bbb07851b196452a2af314222069b.zip |
arch/tile: finish enabling support for TILE-Gx 64-bit chip
This support was partially present in the existing code (look for
"__tilegx__" ifdefs) but with this change you can build a working
kernel using the TILE-Gx toolchain and ARCH=tilegx.
Most of these files are new, generally adding a foo_64.c file
where previously there was just a foo_32.c file.
The ARCH=tilegx directive redirects to arch/tile, not arch/tilegx,
using the existing SRCARCH mechanism in the top-level Makefile.
Changes to existing files:
- <asm/bitops.h> and <asm/bitops_32.h> changed to factor the
include of <asm-generic/bitops/non-atomic.h> in the common header.
- <asm/compat.h> and arch/tile/kernel/compat.c changed to remove
the "const" markers I had put on compat_sys_execve() when trying
to match some recent similar changes to the non-compat execve.
It turns out the compat version wasn't "upgraded" to use const.
- <asm/opcode-tile_64.h> and <asm/opcode_constants_64.h> were
previously included accidentally, with the 32-bit contents. Now
they have the proper 64-bit contents.
Finally, I had to hack the existing hacky drivers/input/input-compat.h
to add yet another "#ifdef" for INPUT_COMPAT_TEST (same as x86_64).
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Acked-by: Dmitry Torokhov <dmitry.torokhov@gmail.com> [drivers/input]
Diffstat (limited to 'arch/tile/include/asm/spinlock_64.h')
-rw-r--r-- | arch/tile/include/asm/spinlock_64.h | 161 |
1 files changed, 161 insertions, 0 deletions
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h new file mode 100644 index 000000000000..72be5904e020 --- /dev/null +++ b/arch/tile/include/asm/spinlock_64.h @@ -0,0 +1,161 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + * + * 64-bit SMP ticket spinlocks, allowing only a single CPU anywhere + * (the type definitions are in asm/spinlock_types.h) + */ + +#ifndef _ASM_TILE_SPINLOCK_64_H +#define _ASM_TILE_SPINLOCK_64_H + +/* Shifts and masks for the various fields in "lock". */ +#define __ARCH_SPIN_CURRENT_SHIFT 17 +#define __ARCH_SPIN_NEXT_MASK 0x7fff +#define __ARCH_SPIN_NEXT_OVERFLOW 0x8000 + +/* + * Return the "current" portion of a ticket lock value, + * i.e. the number that currently owns the lock. + */ +static inline int arch_spin_current(u32 val) +{ + return val >> __ARCH_SPIN_CURRENT_SHIFT; +} + +/* + * Return the "next" portion of a ticket lock value, + * i.e. the number that the next task to try to acquire the lock will get. + */ +static inline int arch_spin_next(u32 val) +{ + return val & __ARCH_SPIN_NEXT_MASK; +} + +/* The lock is locked if a task would have to wait to get it. */ +static inline int arch_spin_is_locked(arch_spinlock_t *lock) +{ + u32 val = lock->lock; + return arch_spin_current(val) != arch_spin_next(val); +} + +/* Bump the current ticket so the next task owns the lock. */ +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + wmb(); /* guarantee anything modified under the lock is visible */ + __insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT); +} + +void arch_spin_unlock_wait(arch_spinlock_t *lock); + +void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val); + +/* Grab the "next" ticket number and bump it atomically. + * If the current ticket is not ours, go to the slow path. + * We also take the slow path if the "next" value overflows. + */ +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + u32 val = __insn_fetchadd4(&lock->lock, 1); + u32 ticket = val & (__ARCH_SPIN_NEXT_MASK | __ARCH_SPIN_NEXT_OVERFLOW); + if (unlikely(arch_spin_current(val) != ticket)) + arch_spin_lock_slow(lock, ticket); +} + +/* Try to get the lock, and return whether we succeeded. */ +int arch_spin_trylock(arch_spinlock_t *lock); + +/* We cannot take an interrupt after getting a ticket, so don't enable them. */ +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) + +/* + * Read-write spinlocks, allowing multiple readers + * but only one writer. + * + * We use fetchadd() for readers, and fetchor() with the sign bit + * for writers. + */ + +#define __WRITE_LOCK_BIT (1 << 31) + +static inline int arch_write_val_locked(int val) +{ + return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */ +} + +/** + * read_can_lock - would read_trylock() succeed? + * @lock: the rwlock in question. + */ +static inline int arch_read_can_lock(arch_rwlock_t *rw) +{ + return !arch_write_val_locked(rw->lock); +} + +/** + * write_can_lock - would write_trylock() succeed? + * @lock: the rwlock in question. + */ +static inline int arch_write_can_lock(arch_rwlock_t *rw) +{ + return rw->lock == 0; +} + +extern void __read_lock_failed(arch_rwlock_t *rw); + +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + u32 val = __insn_fetchaddgez4(&rw->lock, 1); + if (unlikely(arch_write_val_locked(val))) + __read_lock_failed(rw); +} + +extern void __write_lock_failed(arch_rwlock_t *rw, u32 val); + +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT); + if (unlikely(val != 0)) + __write_lock_failed(rw, val); +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) +{ + __insn_mf(); + __insn_fetchadd4(&rw->lock, -1); +} + +static inline void arch_write_unlock(arch_rwlock_t *rw) +{ + __insn_mf(); + rw->lock = 0; +} + +static inline int arch_read_trylock(arch_rwlock_t *rw) +{ + return !arch_write_val_locked(__insn_fetchaddgez4(&rw->lock, 1)); +} + +static inline int arch_write_trylock(arch_rwlock_t *rw) +{ + u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT); + if (likely(val == 0)) + return 1; + if (!arch_write_val_locked(val)) + __insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT); + return 0; +} + +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) + +#endif /* _ASM_TILE_SPINLOCK_64_H */ |