diff options
author | Tejun Heo <tj@kernel.org> | 2010-01-05 09:17:33 +0900 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-01-05 09:17:33 +0900 |
commit | 32032df6c2f6c9c6b2ada2ce42322231824f70c2 (patch) | |
tree | b1ce838a37044bb38dfc128e2116ca35630e629a /arch/sh/include/asm | |
parent | 22b737f4c75197372d64afc6ed1bccd58c00e549 (diff) | |
parent | c5974b835a909ff15c3b7e6cf6789b5eb919f419 (diff) | |
download | talos-op-linux-32032df6c2f6c9c6b2ada2ce42322231824f70c2.tar.gz talos-op-linux-32032df6c2f6c9c6b2ada2ce42322231824f70c2.zip |
Merge branch 'master' into percpu
Conflicts:
arch/powerpc/platforms/pseries/hvCall.S
include/linux/percpu.h
Diffstat (limited to 'arch/sh/include/asm')
44 files changed, 513 insertions, 621 deletions
diff --git a/arch/sh/include/asm/.gitignore b/arch/sh/include/asm/.gitignore deleted file mode 100644 index 378db779fb6c..000000000000 --- a/arch/sh/include/asm/.gitignore +++ /dev/null @@ -1 +0,0 @@ -machtypes.h diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h index 80d40813e057..99d6b3ecbe22 100644 --- a/arch/sh/include/asm/addrspace.h +++ b/arch/sh/include/asm/addrspace.h @@ -28,9 +28,6 @@ /* Returns the privileged segment base of a given address */ #define PXSEG(a) (((unsigned long)(a)) & 0xe0000000) -/* Returns the physical address of a PnSEG (n=1,2) address */ -#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff) - #if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED) /* * Map an address to a certain privileged segment @@ -60,5 +57,11 @@ #define P3_ADDR_MAX P4SEG #endif +#ifndef __ASSEMBLY__ +#ifdef CONFIG_PMB +extern int __in_29bit_mode(void); +#endif /* CONFIG_PMB */ +#endif /* __ASSEMBLY__ */ + #endif /* __KERNEL__ */ #endif /* __ASM_SH_ADDRSPACE_H */ diff --git a/arch/sh/include/asm/asm-offsets.h b/arch/sh/include/asm/asm-offsets.h new file mode 100644 index 000000000000..d370ee36a182 --- /dev/null +++ b/arch/sh/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include <generated/asm-offsets.h> diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index e8e78137c6f5..b16388d71954 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -78,11 +78,10 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -/* Atomic operations are already serializing on SH */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() +#define smp_mb__before_atomic_dec() smp_mb() +#define smp_mb__after_atomic_dec() smp_mb() +#define smp_mb__before_atomic_inc() smp_mb() +#define smp_mb__after_atomic_inc() smp_mb() #include <asm-generic/atomic-long.h> #include <asm-generic/atomic64.h> diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index ebe595b7ab1f..98511e4d28cb 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h @@ -26,8 +26,8 @@ /* * clear_bit() doesn't provide any barrier for the compiler. */ -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() #ifdef CONFIG_SUPERH32 static inline unsigned long ffz(unsigned long word) diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h index 46260fcbdf4b..02a19a1c033a 100644 --- a/arch/sh/include/asm/bugs.h +++ b/arch/sh/include/asm/bugs.h @@ -14,11 +14,15 @@ #include <asm/processor.h> +extern void select_idle_routine(void); + static void __init check_bugs(void) { extern unsigned long loops_per_jiffy; char *p = &init_utsname()->machine[2]; /* "sh" */ + select_idle_routine(); + current_cpu_data.loops_per_jiffy = loops_per_jiffy; switch (current_cpu_data.family) { diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index c29918f3c819..dda96eb3e7c0 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h @@ -42,6 +42,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *page); extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_icache_page(struct vm_area_struct *vma, diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 69d56dd4c968..87ced133a363 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -1,219 +1,108 @@ #ifndef __ASM_SH_DMA_MAPPING_H #define __ASM_SH_DMA_MAPPING_H -#include <linux/mm.h> -#include <linux/scatterlist.h> -#include <linux/dma-debug.h> -#include <asm/cacheflush.h> -#include <asm/io.h> +extern struct dma_map_ops *dma_ops; +extern void no_iommu_init(void); + +static inline struct dma_map_ops *get_dma_ops(struct device *dev) +{ + return dma_ops; +} + #include <asm-generic/dma-coherent.h> +#include <asm-generic/dma-mapping-common.h> + +static inline int dma_supported(struct device *dev, u64 mask) +{ + struct dma_map_ops *ops = get_dma_ops(dev); -extern struct bus_type pci_bus_type; + if (ops->dma_supported) + return ops->dma_supported(dev, mask); -#define dma_supported(dev, mask) (1) + return 1; +} static inline int dma_set_mask(struct device *dev, u64 mask) { + struct dma_map_ops *ops = get_dma_ops(dev); + if (!dev->dma_mask || !dma_supported(dev, mask)) return -EIO; + if (ops->set_dma_mask) + return ops->set_dma_mask(dev, mask); *dev->dma_mask = mask; return 0; } -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); - -void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir); #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_is_consistent(d, h) (1) - -static inline dma_addr_t dma_map_single(struct device *dev, - void *ptr, size_t size, - enum dma_data_direction dir) -{ - dma_addr_t addr = virt_to_phys(ptr); - -#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) - if (dev->bus == &pci_bus_type) - return addr; -#endif - dma_cache_sync(dev, ptr, size, dir); - - debug_dma_map_page(dev, virt_to_page(ptr), - (unsigned long)ptr & ~PAGE_MASK, size, - dir, addr, true); - - return addr; -} - -static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ - debug_dma_unmap_page(dev, addr, size, dir, true); -} -static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) -{ - int i; - - for (i = 0; i < nents; i++) { -#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) - dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); +#ifdef CONFIG_DMA_COHERENT +#define dma_is_consistent(d, h) (1) +#else +#define dma_is_consistent(d, h) (0) #endif - sg[i].dma_address = sg_phys(&sg[i]); - sg[i].dma_length = sg[i].length; - } - debug_dma_map_sg(dev, sg, nents, i, dir); - - return nents; -} - -static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) -{ - debug_dma_unmap_sg(dev, sg, nents, dir); -} - -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - return dma_map_single(dev, page_address(page) + offset, size, dir); -} - -static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, - size_t size, enum dma_data_direction dir) -{ - dma_unmap_single(dev, dma_address, size, dir); -} - -static inline void __dma_sync_single(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir) +static inline int dma_get_cache_alignment(void) { -#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) - if (dev->bus == &pci_bus_type) - return; -#endif - dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir); + /* + * Each processor family will define its own L1_CACHE_SHIFT, + * L1_CACHE_BYTES wraps to this, so this is always safe. + */ + return L1_CACHE_BYTES; } -static inline void dma_sync_single_range(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction dir) +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { -#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) - if (dev->bus == &pci_bus_type) - return; -#endif - dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir); -} + struct dma_map_ops *ops = get_dma_ops(dev); -static inline void __dma_sync_sg(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) -{ - int i; + if (ops->mapping_error) + return ops->mapping_error(dev, dma_addr); - for (i = 0; i < nelems; i++) { -#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) - dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); -#endif - sg[i].dma_address = sg_phys(&sg[i]); - sg[i].dma_length = sg[i].length; - } + return dma_addr == 0; } -static inline void dma_sync_single_for_cpu(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction dir) +static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) { - __dma_sync_single(dev, dma_handle, size, dir); - debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir); -} + struct dma_map_ops *ops = get_dma_ops(dev); + void *memory; -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, - size_t size, - enum dma_data_direction dir) -{ - __dma_sync_single(dev, dma_handle, size, dir); - debug_dma_sync_single_for_device(dev, dma_handle, size, dir); -} + if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) + return memory; + if (!ops->alloc_coherent) + return NULL; -static inline void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction direction) -{ - dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); - debug_dma_sync_single_range_for_cpu(dev, dma_handle, - offset, size, direction); -} + memory = ops->alloc_coherent(dev, size, dma_handle, gfp); + debug_dma_alloc_coherent(dev, size, *dma_handle, memory); -static inline void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction direction) -{ - dma_sync_single_for_device(dev, dma_handle+offset, size, direction); - debug_dma_sync_single_range_for_device(dev, dma_handle, - offset, size, direction); + return memory; } - -static inline void dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction dir) +static inline void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) { - __dma_sync_sg(dev, sg, nelems, dir); - debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); -} + struct dma_map_ops *ops = get_dma_ops(dev); -static inline void dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction dir) -{ - __dma_sync_sg(dev, sg, nelems, dir); - debug_dma_sync_sg_for_device(dev, sg, nelems, dir); -} + WARN_ON(irqs_disabled()); /* for portability */ -static inline int dma_get_cache_alignment(void) -{ - /* - * Each processor family will define its own L1_CACHE_SHIFT, - * L1_CACHE_BYTES wraps to this, so this is always safe. - */ - return L1_CACHE_BYTES; -} + if (dma_release_from_coherent(dev, get_order(size), vaddr)) + return; -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == 0; + debug_dma_free_coherent(dev, size, vaddr, dma_handle); + if (ops->free_coherent) + ops->free_coherent(dev, size, vaddr, dma_handle); } -#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY - -extern int -dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, - dma_addr_t device_addr, size_t size, int flags); - -extern void -dma_release_declared_memory(struct device *dev); - -extern void * -dma_mark_declared_memory_occupied(struct device *dev, - dma_addr_t device_addr, size_t size); +/* arch/sh/mm/consistent.c */ +extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_addr, gfp_t flag); +extern void dma_generic_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); #endif /* __ASM_SH_DMA_MAPPING_H */ diff --git a/arch/sh/include/asm/dma.h b/arch/sh/include/asm/dma.h index 04ad0e1e637e..07373a074090 100644 --- a/arch/sh/include/asm/dma.h +++ b/arch/sh/include/asm/dma.h @@ -19,9 +19,11 @@ #include <asm-generic/dma.h> #ifdef CONFIG_NR_DMA_CHANNELS -# define MAX_DMA_CHANNELS (CONFIG_NR_DMA_CHANNELS) +# define MAX_DMA_CHANNELS (CONFIG_NR_DMA_CHANNELS) +#elif defined(CONFIG_NR_ONCHIP_DMA_CHANNELS) +# define MAX_DMA_CHANNELS (CONFIG_NR_ONCHIP_DMA_CHANNELS) #else -# define MAX_DMA_CHANNELS (CONFIG_NR_ONCHIP_DMA_CHANNELS) +# define MAX_DMA_CHANNELS 0 #endif /* diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index ced6795891a6..bdccbbfdc0bd 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h @@ -194,6 +194,12 @@ #define DWARF_ARCH_RA_REG 17 #ifndef __ASSEMBLY__ + +#include <linux/compiler.h> +#include <linux/bug.h> +#include <linux/list.h> +#include <linux/module.h> + /* * Read either the frame pointer (r14) or the stack pointer (r15). * NOTE: this MUST be inlined. @@ -241,6 +247,12 @@ struct dwarf_cie { unsigned long flags; #define DWARF_CIE_Z_AUGMENTATION (1 << 0) + + /* + * 'mod' will be non-NULL if this CIE came from a module's + * .eh_frame section. + */ + struct module *mod; }; /** @@ -255,6 +267,12 @@ struct dwarf_fde { unsigned char *instructions; unsigned char *end; struct list_head link; + + /* + * 'mod' will be non-NULL if this FDE came from a module's + * .eh_frame section. + */ + struct module *mod; }; /** @@ -364,6 +382,12 @@ static inline unsigned int DW_CFA_operand(unsigned long insn) extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, struct dwarf_frame *); +extern void dwarf_free_frame(struct dwarf_frame *); + +extern int module_dwarf_finalize(const Elf_Ehdr *, const Elf_Shdr *, + struct module *); +extern void module_dwarf_cleanup(struct module *); + #endif /* !__ASSEMBLY__ */ #define CFI_STARTPROC .cfi_startproc @@ -391,6 +415,10 @@ extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, static inline void dwarf_unwinder_init(void) { } + +#define module_dwarf_finalize(hdr, sechdrs, me) (0) +#define module_dwarf_cleanup(mod) do { } while (0) + #endif #endif /* CONFIG_DWARF_UNWINDER */ diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index ccb1d93bb043..ac04255022b6 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h @@ -114,7 +114,6 @@ typedef struct user_fpu_struct elf_fpregset_t; */ #define CORE_DUMP_USE_REGSET -#define USE_ELF_CORE_DUMP #define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC #define ELF_EXEC_PAGESIZE PAGE_SIZE diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h index 721fcc4d5e98..5ac1e40a511c 100644 --- a/arch/sh/include/asm/fixmap.h +++ b/arch/sh/include/asm/fixmap.h @@ -14,9 +14,9 @@ #define _ASM_FIXMAP_H #include <linux/kernel.h> +#include <linux/threads.h> #include <asm/page.h> #ifdef CONFIG_HIGHMEM -#include <linux/threads.h> #include <asm/kmap_types.h> #endif @@ -46,9 +46,15 @@ * fix-mapped? */ enum fixed_addresses { -#define FIX_N_COLOURS 16 + /* + * The FIX_CMAP entries are used by kmap_coherent() to get virtual + * addresses which are of a known color, and so their values are + * important. __fix_to_virt(FIX_CMAP_END - n) must give an address + * which is the same color as a page (n<<PAGE_SHIFT). + */ +#define FIX_N_COLOURS 8 FIX_CMAP_BEGIN, - FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, + FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1, FIX_UNCACHED, #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h index 1d3aee04b5cc..fb6bbb9b1cc8 100644 --- a/arch/sh/include/asm/fpu.h +++ b/arch/sh/include/asm/fpu.h @@ -18,16 +18,15 @@ static inline void grab_fpu(struct pt_regs *regs) struct task_struct; -extern void save_fpu(struct task_struct *__tsk, struct pt_regs *regs); +extern void save_fpu(struct task_struct *__tsk); +void fpu_state_restore(struct pt_regs *regs); #else +#define save_fpu(tsk) do { } while (0) #define release_fpu(regs) do { } while (0) #define grab_fpu(regs) do { } while (0) +#define fpu_state_restore(regs) do { } while (0) -static inline void save_fpu(struct task_struct *tsk, struct pt_regs *regs) -{ - clear_tsk_thread_flag(tsk, TIF_USEDFPU); -} #endif struct user_regset; @@ -39,19 +38,28 @@ extern int fpregs_get(struct task_struct *target, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf); +static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) +{ + if (task_thread_info(tsk)->status & TS_USEDFPU) { + task_thread_info(tsk)->status &= ~TS_USEDFPU; + save_fpu(tsk); + release_fpu(regs); + } else + tsk->fpu_counter = 0; +} + static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) { preempt_disable(); - if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) - save_fpu(tsk, regs); + __unlazy_fpu(tsk, regs); preempt_enable(); } static inline void clear_fpu(struct task_struct *tsk, struct pt_regs *regs) { preempt_disable(); - if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { - clear_tsk_thread_flag(tsk, TIF_USEDFPU); + if (task_thread_info(tsk)->status & TS_USEDFPU) { + task_thread_info(tsk)->status &= ~TS_USEDFPU; release_fpu(regs); } preempt_enable(); diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 12f3a31f20af..13e9966464c2 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -35,4 +35,21 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) #endif /* __ASSEMBLY__ */ #endif /* CONFIG_FUNCTION_TRACER */ +#ifndef __ASSEMBLY__ + +/* arch/sh/kernel/return_address.c */ +extern void *return_address(unsigned int); + +#define HAVE_ARCH_CALLER_ADDR + +#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +#define CALLER_ADDR1 ((unsigned long)return_address(1)) +#define CALLER_ADDR2 ((unsigned long)return_address(2)) +#define CALLER_ADDR3 ((unsigned long)return_address(3)) +#define CALLER_ADDR4 ((unsigned long)return_address(4)) +#define CALLER_ADDR5 ((unsigned long)return_address(5)) +#define CALLER_ADDR6 ((unsigned long)return_address(6)) + +#endif /* __ASSEMBLY__ */ + #endif /* __ASM_SH_FTRACE_H */ diff --git a/arch/sh/include/asm/gpio.h b/arch/sh/include/asm/gpio.h index 61f93da2c62e..f8d9a731e903 100644 --- a/arch/sh/include/asm/gpio.h +++ b/arch/sh/include/asm/gpio.h @@ -20,7 +20,7 @@ #endif #define ARCH_NR_GPIOS 512 -#include <asm-generic/gpio.h> +#include <linux/sh_pfc.h> #ifdef CONFIG_GPIOLIB @@ -53,84 +53,4 @@ static inline int irq_to_gpio(unsigned int irq) #endif /* CONFIG_GPIOLIB */ -typedef unsigned short pinmux_enum_t; -typedef unsigned short pinmux_flag_t; - -#define PINMUX_TYPE_NONE 0 -#define PINMUX_TYPE_FUNCTION 1 -#define PINMUX_TYPE_GPIO 2 -#define PINMUX_TYPE_OUTPUT 3 -#define PINMUX_TYPE_INPUT 4 -#define PINMUX_TYPE_INPUT_PULLUP 5 -#define PINMUX_TYPE_INPUT_PULLDOWN 6 - -#define PINMUX_FLAG_TYPE (0x7) -#define PINMUX_FLAG_WANT_PULLUP (1 << 3) -#define PINMUX_FLAG_WANT_PULLDOWN (1 << 4) - -#define PINMUX_FLAG_DBIT_SHIFT 5 -#define PINMUX_FLAG_DBIT (0x1f << PINMUX_FLAG_DBIT_SHIFT) -#define PINMUX_FLAG_DREG_SHIFT 10 -#define PINMUX_FLAG_DREG (0x3f << PINMUX_FLAG_DREG_SHIFT) - -struct pinmux_gpio { - pinmux_enum_t enum_id; - pinmux_flag_t flags; -}; - -#define PINMUX_GPIO(gpio, data_or_mark) [gpio] = { data_or_mark } -#define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0 - -struct pinmux_cfg_reg { - unsigned long reg, reg_width, field_width; - unsigned long *cnt; - pinmux_enum_t *enum_ids; -}; - -#define PINMUX_CFG_REG(name, r, r_width, f_width) \ - .reg = r, .reg_width = r_width, .field_width = f_width, \ - .cnt = (unsigned long [r_width / f_width]) {}, \ - .enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)]) \ - -struct pinmux_data_reg { - unsigned long reg, reg_width, reg_shadow; - pinmux_enum_t *enum_ids; -}; - -#define PINMUX_DATA_REG(name, r, r_width) \ - .reg = r, .reg_width = r_width, \ - .enum_ids = (pinmux_enum_t [r_width]) \ - -struct pinmux_range { - pinmux_enum_t begin; - pinmux_enum_t end; - pinmux_enum_t force; -}; - -struct pinmux_info { - char *name; - pinmux_enum_t reserved_id; - struct pinmux_range data; - struct pinmux_range input; - struct pinmux_range input_pd; - struct pinmux_range input_pu; - struct pinmux_range output; - struct pinmux_range mark; - struct pinmux_range function; - - unsigned first_gpio, last_gpio; - - struct pinmux_gpio *gpios; - struct pinmux_cfg_reg *cfg_regs; - struct pinmux_data_reg *data_regs; - - pinmux_enum_t *gpio_data; - unsigned int gpio_data_size; - - unsigned long *gpio_in_use; - struct gpio_chip chip; -}; - -int register_pinmux(struct pinmux_info *pip); - #endif /* __ASM_SH_GPIO_H */ diff --git a/arch/sh/include/asm/hardirq.h b/arch/sh/include/asm/hardirq.h index a5be4afa790b..48b191313a99 100644 --- a/arch/sh/include/asm/hardirq.h +++ b/arch/sh/include/asm/hardirq.h @@ -1,9 +1,16 @@ #ifndef __ASM_SH_HARDIRQ_H #define __ASM_SH_HARDIRQ_H -extern void ack_bad_irq(unsigned int irq); -#define ack_bad_irq ack_bad_irq +#include <linux/threads.h> +#include <linux/irq.h> + +typedef struct { + unsigned int __softirq_pending; + unsigned int __nmi_count; /* arch dependent */ +} ____cacheline_aligned irq_cpustat_t; -#include <asm-generic/hardirq.h> +#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ + +extern void ack_bad_irq(unsigned int irq); #endif /* __ASM_SH_HARDIRQ_H */ diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 5be45ea4dfec..026dd659a640 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -90,15 +90,11 @@ #define ctrl_outl __raw_writel #define ctrl_outq __raw_writeq +extern unsigned long generic_io_base; + static inline void ctrl_delay(void) { -#ifdef CONFIG_CPU_SH4 - __raw_readw(CCN_PVR); -#elif defined(P2SEG) - __raw_readw(P2SEG); -#else -#error "Need a dummy address for delay" -#endif + __raw_readw(generic_io_base); } #define __BUILD_MEMORY_STRING(bwlq, type) \ @@ -186,8 +182,6 @@ __BUILD_MEMORY_STRING(q, u64) #define IO_SPACE_LIMIT 0xffffffff -extern unsigned long generic_io_base; - /* * This function provides a method for the generic case where a * board-specific ioport_map simply needs to return the port + some @@ -239,14 +233,20 @@ unsigned long long poke_real_address_q(unsigned long long addr, * doesn't exist, so everything must go through page tables. */ #ifdef CONFIG_MMU -void __iomem *__ioremap(unsigned long offset, unsigned long size, - unsigned long flags); +void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, + unsigned long flags, void *caller); void __iounmap(void __iomem *addr); static inline void __iomem * +__ioremap(unsigned long offset, unsigned long size, unsigned long flags) +{ + return __ioremap_caller(offset, size, flags, __builtin_return_address(0)); +} + +static inline void __iomem * __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) { -#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) +#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) unsigned long last_addr = offset + size - 1; #endif void __iomem *ret; @@ -255,7 +255,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) if (ret) return ret; -#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) +#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) /* * For P1 and P2 space this is trivial, as everything is already * mapped. Uncached access for P1 addresses are done through P2. @@ -277,6 +277,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) return __ioremap(offset, size, flags); } #else +#define __ioremap(offset, size, flags) ((void __iomem *)(offset)) #define __ioremap_mode(offset, size, flags) ((void __iomem *)(offset)) #define __iounmap(addr) do { } while (0) #endif /* CONFIG_MMU */ diff --git a/arch/sh/include/asm/irqflags.h b/arch/sh/include/asm/irqflags.h index 46e71da5be6b..a741153b41c2 100644 --- a/arch/sh/include/asm/irqflags.h +++ b/arch/sh/include/asm/irqflags.h @@ -1,34 +1,9 @@ #ifndef __ASM_SH_IRQFLAGS_H #define __ASM_SH_IRQFLAGS_H -#ifdef CONFIG_SUPERH32 -#include "irqflags_32.h" -#else -#include "irqflags_64.h" -#endif +#define RAW_IRQ_DISABLED 0xf0 +#define RAW_IRQ_ENABLED 0x00 -#define raw_local_save_flags(flags) \ - do { (flags) = __raw_local_save_flags(); } while (0) - -static inline int raw_irqs_disabled_flags(unsigned long flags) -{ - return (flags != 0); -} - -static inline int raw_irqs_disabled(void) -{ - unsigned long flags = __raw_local_save_flags(); - - return raw_irqs_disabled_flags(flags); -} - -#define raw_local_irq_save(flags) \ - do { (flags) = __raw_local_irq_save(); } while (0) - -static inline void raw_local_irq_restore(unsigned long flags) -{ - if ((flags & 0xf0) != 0xf0) - raw_local_irq_enable(); -} +#include <asm-generic/irqflags.h> #endif /* __ASM_SH_IRQFLAGS_H */ diff --git a/arch/sh/include/asm/irqflags_32.h b/arch/sh/include/asm/irqflags_32.h deleted file mode 100644 index 60218f541340..000000000000 --- a/arch/sh/include/asm/irqflags_32.h +++ /dev/null @@ -1,99 +0,0 @@ -#ifndef __ASM_SH_IRQFLAGS_32_H -#define __ASM_SH_IRQFLAGS_32_H - -static inline void raw_local_irq_enable(void) -{ - unsigned long __dummy0, __dummy1; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "and %1, %0\n\t" -#ifdef CONFIG_CPU_HAS_SR_RB - "stc r6_bank, %1\n\t" - "or %1, %0\n\t" -#endif - "ldc %0, sr\n\t" - : "=&r" (__dummy0), "=r" (__dummy1) - : "1" (~0x000000f0) - : "memory" - ); -} - -static inline void raw_local_irq_disable(void) -{ - unsigned long flags; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "or #0xf0, %0\n\t" - "ldc %0, sr\n\t" - : "=&z" (flags) - : /* no inputs */ - : "memory" - ); -} - -static inline void set_bl_bit(void) -{ - unsigned long __dummy0, __dummy1; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "or %2, %0\n\t" - "and %3, %0\n\t" - "ldc %0, sr\n\t" - : "=&r" (__dummy0), "=r" (__dummy1) - : "r" (0x10000000), "r" (0xffffff0f) - : "memory" - ); -} - -static inline void clear_bl_bit(void) -{ - unsigned long __dummy0, __dummy1; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "and %2, %0\n\t" - "ldc %0, sr\n\t" - : "=&r" (__dummy0), "=r" (__dummy1) - : "1" (~0x10000000) - : "memory" - ); -} - -static inline unsigned long __raw_local_save_flags(void) -{ - unsigned long flags; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "and #0xf0, %0\n\t" - : "=&z" (flags) - : /* no inputs */ - : "memory" - ); - - return flags; -} - -static inline unsigned long __raw_local_irq_save(void) -{ - unsigned long flags, __dummy; - - __asm__ __volatile__ ( - "stc sr, %1\n\t" - "mov %1, %0\n\t" - "or #0xf0, %0\n\t" - "ldc %0, sr\n\t" - "mov %1, %0\n\t" - "and #0xf0, %0\n\t" - : "=&z" (flags), "=&r" (__dummy) - : /* no inputs */ - : "memory" - ); - - return flags; -} - -#endif /* __ASM_SH_IRQFLAGS_32_H */ diff --git a/arch/sh/include/asm/irqflags_64.h b/arch/sh/include/asm/irqflags_64.h deleted file mode 100644 index 88f65222c1d4..000000000000 --- a/arch/sh/include/asm/irqflags_64.h +++ /dev/null @@ -1,85 +0,0 @@ -#ifndef __ASM_SH_IRQFLAGS_64_H -#define __ASM_SH_IRQFLAGS_64_H - -#include <cpu/registers.h> - -#define SR_MASK_LL 0x00000000000000f0LL -#define SR_BL_LL 0x0000000010000000LL - -static inline void raw_local_irq_enable(void) -{ - unsigned long long __dummy0, __dummy1 = ~SR_MASK_LL; - - __asm__ __volatile__("getcon " __SR ", %0\n\t" - "and %0, %1, %0\n\t" - "putcon %0, " __SR "\n\t" - : "=&r" (__dummy0) - : "r" (__dummy1)); -} - -static inline void raw_local_irq_disable(void) -{ - unsigned long long __dummy0, __dummy1 = SR_MASK_LL; - - __asm__ __volatile__("getcon " __SR ", %0\n\t" - "or %0, %1, %0\n\t" - "putcon %0, " __SR "\n\t" - : "=&r" (__dummy0) - : "r" (__dummy1)); -} - -static inline void set_bl_bit(void) -{ - unsigned long long __dummy0, __dummy1 = SR_BL_LL; - - __asm__ __volatile__("getcon " __SR ", %0\n\t" - "or %0, %1, %0\n\t" - "putcon %0, " __SR "\n\t" - : "=&r" (__dummy0) - : "r" (__dummy1)); - -} - -static inline void clear_bl_bit(void) -{ - unsigned long long __dummy0, __dummy1 = ~SR_BL_LL; - - __asm__ __volatile__("getcon " __SR ", %0\n\t" - "and %0, %1, %0\n\t" - "putcon %0, " __SR "\n\t" - : "=&r" (__dummy0) - : "r" (__dummy1)); -} - -static inline unsigned long __raw_local_save_flags(void) -{ - unsigned long long __dummy = SR_MASK_LL; - unsigned long flags; - - __asm__ __volatile__ ( - "getcon " __SR ", %0\n\t" - "and %0, %1, %0" - : "=&r" (flags) - : "r" (__dummy)); - - return flags; -} - -static inline unsigned long __raw_local_irq_save(void) -{ - unsigned long long __dummy0, __dummy1 = SR_MASK_LL; - unsigned long flags; - - __asm__ __volatile__ ( - "getcon " __SR ", %1\n\t" - "or %1, r63, %0\n\t" - "or %1, %2, %1\n\t" - "putcon %1, " __SR "\n\t" - "and %0, %2, %0" - : "=&r" (flags), "=&r" (__dummy0) - : "r" (__dummy1)); - - return flags; -} - -#endif /* __ASM_SH_IRQFLAGS_64_H */ diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h index 84dd37761f56..9c30955630ff 100644 --- a/arch/sh/include/asm/machvec.h +++ b/arch/sh/include/asm/machvec.h @@ -12,7 +12,7 @@ #include <linux/types.h> #include <linux/time.h> -#include <asm/machtypes.h> +#include <generated/machtypes.h> struct sh_machine_vector { void (*mv_setup)(char **cmdline_p); diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index f5963037c9d6..c7426ad9926e 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -7,12 +7,16 @@ #define PMB_PASCR 0xff000070 #define PMB_IRMCR 0xff000078 +#define PASCR_SE 0x80000000 + #define PMB_ADDR 0xf6100000 #define PMB_DATA 0xf7100000 #define PMB_ENTRY_MAX 16 #define PMB_E_MASK 0x0000000f #define PMB_E_SHIFT 8 +#define PMB_PFN_MASK 0xff000000 + #define PMB_SZ_16M 0x00000000 #define PMB_SZ_64M 0x00000010 #define PMB_SZ_128M 0x00000080 @@ -62,17 +66,10 @@ struct pmb_entry { }; /* arch/sh/mm/pmb.c */ -int __set_pmb_entry(unsigned long vpn, unsigned long ppn, - unsigned long flags, int *entry); -int set_pmb_entry(struct pmb_entry *pmbe); -void clear_pmb_entry(struct pmb_entry *pmbe); -struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, - unsigned long flags); -void pmb_free(struct pmb_entry *pmbe); long pmb_remap(unsigned long virt, unsigned long phys, unsigned long size, unsigned long flags); void pmb_unmap(unsigned long addr); +int pmb_init(void); #endif /* __ASSEMBLY__ */ #endif /* __MMU_H */ - diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index 4163950cd1c6..67f3999b544e 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h @@ -3,8 +3,6 @@ #ifdef __KERNEL__ -#include <linux/dma-mapping.h> - /* Can be used to override the logic in pci_scan_bus for skipping already-configured bus numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the loader */ @@ -54,30 +52,18 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) * address space. The networking and block device layers use * this boolean for bounce buffer decisions. */ -#define PCI_DMA_BUS_IS_PHYS (1) - -#include <linux/types.h> -#include <linux/slab.h> -#include <asm/scatterlist.h> -#include <linux/string.h> -#include <asm/io.h> +#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) /* pci_unmap_{single,page} being a nop depends upon the * configuration. */ -#ifdef CONFIG_SH_PCIDMA_NONCOHERENT -#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ - dma_addr_t ADDR_NAME; -#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ - __u32 LEN_NAME; -#define pci_unmap_addr(PTR, ADDR_NAME) \ - ((PTR)->ADDR_NAME) -#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ - (((PTR)->ADDR_NAME) = (VAL)) -#define pci_unmap_len(PTR, LEN_NAME) \ - ((PTR)->LEN_NAME) -#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ - (((PTR)->LEN_NAME) = (VAL)) +#ifdef CONFIG_DMA_NONCOHERENT +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME; +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME; +#define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) #else #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) diff --git a/arch/sh/include/asm/perf_event.h b/arch/sh/include/asm/perf_event.h index 11a302297ab7..3d0c9f36d150 100644 --- a/arch/sh/include/asm/perf_event.h +++ b/arch/sh/include/asm/perf_event.h @@ -1,8 +1,35 @@ #ifndef __ASM_SH_PERF_EVENT_H #define __ASM_SH_PERF_EVENT_H -/* SH only supports software events through this interface. */ -static inline void set_perf_event_pending(void) {} +struct hw_perf_event; + +#define MAX_HWEVENTS 2 + +struct sh_pmu { + const char *name; + unsigned int num_events; + void (*disable_all)(void); + void (*enable_all)(void); + void (*enable)(struct hw_perf_event *, int); + void (*disable)(struct hw_perf_event *, int); + u64 (*read)(int); + int (*event_map)(int); + unsigned int max_events; + unsigned long raw_event_mask; + const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; +}; + +/* arch/sh/kernel/perf_event.c */ +extern int register_sh_pmu(struct sh_pmu *); +extern int reserve_pmc_hardware(void); +extern void release_pmc_hardware(void); + +static inline void set_perf_event_pending(void) +{ + /* Nothing to see here, move along. */ +} #define PERF_EVENT_INDEX_OFFSET 0 diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 4f3efa7d5a64..ba3046e4f06f 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -75,13 +75,31 @@ static inline unsigned long long neff_sign_extend(unsigned long val) #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) #define FIRST_USER_ADDRESS 0 -#ifdef CONFIG_32BIT -#define PHYS_ADDR_MASK 0xffffffff +#define PHYS_ADDR_MASK29 0x1fffffff +#define PHYS_ADDR_MASK32 0xffffffff + +#ifdef CONFIG_PMB +static inline unsigned long phys_addr_mask(void) +{ + /* Is the MMU in 29bit mode? */ + if (__in_29bit_mode()) + return PHYS_ADDR_MASK29; + + return PHYS_ADDR_MASK32; +} +#elif defined(CONFIG_32BIT) +static inline unsigned long phys_addr_mask(void) +{ + return PHYS_ADDR_MASK32; +} #else -#define PHYS_ADDR_MASK 0x1fffffff +static inline unsigned long phys_addr_mask(void) +{ + return PHYS_ADDR_MASK29; +} #endif -#define PTE_PHYS_MASK (PHYS_ADDR_MASK & PAGE_MASK) +#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK) #define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT) #ifdef CONFIG_SUPERH32 diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h index c0d359ce337b..5003ee86f67b 100644 --- a/arch/sh/include/asm/pgtable_32.h +++ b/arch/sh/include/asm/pgtable_32.h @@ -108,7 +108,7 @@ static inline unsigned long copy_ptea_attributes(unsigned long x) #define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE) #endif -#define _PAGE_FLAGS_HARDWARE_MASK (PHYS_ADDR_MASK & ~(_PAGE_CLEAR_FLAGS)) +#define _PAGE_FLAGS_HARDWARE_MASK (phys_addr_mask() & ~(_PAGE_CLEAR_FLAGS)) /* Hardware flags, page size encoding */ #if !defined(CONFIG_MMU) @@ -344,7 +344,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte) #define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL) #ifdef CONFIG_X2TLB -#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) +#define pte_write(pte) \ + ((pte).pte_high & (_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE)) #else #define pte_write(pte) ((pte).pte_low & _PAGE_RW) #endif @@ -358,7 +359,7 @@ static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; } * individually toggled (and user permissions are entirely decoupled from * kernel permissions), we attempt to couple them a bit more sanely here. */ -PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE); +PTE_BIT_FUNC(high, wrprotect, &= ~(_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE)); PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE); PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE); #else diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 9a8714945dc9..1f3d6fab660c 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -56,6 +56,7 @@ asmlinkage void __init sh_cpu_init(void); #define SR_DSP 0x00001000 #define SR_IMASK 0x000000f0 #define SR_FD 0x00008000 +#define SR_MD 0x40000000 /* * DSP structure and data @@ -136,7 +137,7 @@ struct mm_struct; extern void release_thread(struct task_struct *); /* Prepare to copy thread state - unlazy all lazy status */ -#define prepare_to_copy(tsk) do { } while (0) +void prepare_to_copy(struct task_struct *tsk); /* * create a kernel thread without removing it from tasklists diff --git a/arch/sh/include/asm/rwsem.h b/arch/sh/include/asm/rwsem.h index 1987f3ea7f1b..06e2251a5e48 100644 --- a/arch/sh/include/asm/rwsem.h +++ b/arch/sh/include/asm/rwsem.h @@ -41,7 +41,7 @@ struct rw_semaphore { #endif #define __RWSEM_INITIALIZER(name) \ - { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ + { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ LIST_HEAD_INIT((name).wait_list) \ __RWSEM_DEP_MAP_INIT(name) } diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h index 327cc2e4c97b..e38d1d4c7f6f 100644 --- a/arch/sh/include/asm/scatterlist.h +++ b/arch/sh/include/asm/scatterlist.h @@ -1,7 +1,7 @@ #ifndef __ASM_SH_SCATTERLIST_H #define __ASM_SH_SCATTERLIST_H -#define ISA_DMA_THRESHOLD PHYS_ADDR_MASK +#define ISA_DMA_THRESHOLD phys_addr_mask() #include <asm-generic/scatterlist.h> diff --git a/arch/sh/include/asm/sh_eth.h b/arch/sh/include/asm/sh_eth.h index acf99700deed..f739061e2ee4 100644 --- a/arch/sh/include/asm/sh_eth.h +++ b/arch/sh/include/asm/sh_eth.h @@ -7,6 +7,7 @@ struct sh_eth_plat_data { int phy; int edmac_endian; + unsigned char mac_addr[6]; unsigned no_ether_link:1; unsigned ether_link_active_low:1; }; diff --git a/arch/sh/include/asm/sh_keysc.h b/arch/sh/include/asm/sh_keysc.h deleted file mode 100644 index 4a65b1e40eab..000000000000 --- a/arch/sh/include/asm/sh_keysc.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef __ASM_KEYSC_H__ -#define __ASM_KEYSC_H__ - -#define SH_KEYSC_MAXKEYS 30 - -struct sh_keysc_info { - enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3 } mode; - int scan_timing; /* 0 -> 7, see KYCR1, SCN[2:0] */ - int delay; - int kycr2_delay; - int keycodes[SH_KEYSC_MAXKEYS]; -}; - -#endif /* __ASM_KEYSC_H__ */ diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index a28c9f0053fd..bdc0f3b6c56a 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h @@ -23,10 +23,10 @@ * Your basic SMP spinlocks, allowing only a single CPU anywhere */ -#define __raw_spin_is_locked(x) ((x)->lock <= 0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_unlock_wait(x) \ - do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0) +#define arch_spin_is_locked(x) ((x)->lock <= 0) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_unlock_wait(x) \ + do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) /* * Simple spin lock operations. There are two variants, one clears IRQ's @@ -34,14 +34,14 @@ * * We make no fairness assumptions. They have a cost. */ -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; unsigned long oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_spin_lock \n\t" + "movli.l @%2, %0 ! arch_spin_lock \n\t" "mov %0, %1 \n\t" "mov #0, %0 \n\t" "movco.l %0, @%2 \n\t" @@ -54,12 +54,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) ); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { unsigned long tmp; __asm__ __volatile__ ( - "mov #1, %0 ! __raw_spin_unlock \n\t" + "mov #1, %0 ! arch_spin_unlock \n\t" "mov.l %0, @%1 \n\t" : "=&z" (tmp) : "r" (&lock->lock) @@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) ); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_spin_trylock \n\t" + "movli.l @%2, %0 ! arch_spin_trylock \n\t" "mov %0, %1 \n\t" "mov #0, %0 \n\t" "movco.l %0, @%2 \n\t" @@ -100,21 +100,21 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((x)->lock > 0) +#define arch_read_can_lock(x) ((x)->lock > 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) +#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_read_lock \n\t" + "movli.l @%1, %0 ! arch_read_lock \n\t" "cmp/pl %0 \n\t" "bf 1b \n\t" "add #-1, %0 \n\t" @@ -126,13 +126,13 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) ); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_read_unlock \n\t" + "movli.l @%1, %0 ! arch_read_unlock \n\t" "add #1, %0 \n\t" "movco.l %0, @%1 \n\t" "bf 1b \n\t" @@ -142,13 +142,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) ); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_write_lock \n\t" + "movli.l @%1, %0 ! arch_write_lock \n\t" "cmp/hs %2, %0 \n\t" "bf 1b \n\t" "sub %2, %0 \n\t" @@ -160,23 +160,23 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) ); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { __asm__ __volatile__ ( - "mov.l %1, @%0 ! __raw_write_unlock \n\t" + "mov.l %1, @%0 ! arch_write_unlock \n\t" : : "r" (&rw->lock), "r" (RW_LOCK_BIAS) : "t", "memory" ); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_read_trylock \n\t" + "movli.l @%2, %0 ! arch_read_trylock \n\t" "mov %0, %1 \n\t" "cmp/pl %0 \n\t" "bf 2f \n\t" @@ -193,13 +193,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) return (oldval > 0); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_write_trylock \n\t" + "movli.l @%2, %0 ! arch_write_trylock \n\t" "mov %0, %1 \n\t" "cmp/hs %3, %0 \n\t" "bf 2f \n\t" @@ -216,11 +216,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return (oldval > (RW_LOCK_BIAS - 1)); } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SH_SPINLOCK_H */ diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h index b4d244e7b60c..9b7560db06ca 100644 --- a/arch/sh/include/asm/spinlock_types.h +++ b/arch/sh/include/asm/spinlock_types.h @@ -7,15 +7,15 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; #define RW_LOCK_BIAS 0x01000000 -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h index 5c8ea28ff7a4..fe9c2a1ad047 100644 --- a/arch/sh/include/asm/suspend.h +++ b/arch/sh/include/asm/suspend.h @@ -2,6 +2,7 @@ #define _ASM_SH_SUSPEND_H #ifndef __ASSEMBLY__ +#include <linux/notifier.h> static inline int arch_prepare_suspend(void) { return 0; } #include <asm/ptrace.h> @@ -19,6 +20,69 @@ void sh_mobile_setup_cpuidle(void); static inline void sh_mobile_setup_cpuidle(void) {} #endif +/* notifier chains for pre/post sleep hooks */ +extern struct atomic_notifier_head sh_mobile_pre_sleep_notifier_list; +extern struct atomic_notifier_head sh_mobile_post_sleep_notifier_list; + +/* priority levels for notifiers */ +#define SH_MOBILE_SLEEP_BOARD 0 +#define SH_MOBILE_SLEEP_CPU 1 +#define SH_MOBILE_PRE(x) (x) +#define SH_MOBILE_POST(x) (-(x)) + +/* board code registration function for self-refresh assembly snippets */ +void sh_mobile_register_self_refresh(unsigned long flags, + void *pre_start, void *pre_end, + void *post_start, void *post_end); + +/* register structure for address/data information */ +struct sh_sleep_regs { + unsigned long stbcr; + unsigned long bar; + + /* MMU */ + unsigned long pteh; + unsigned long ptel; + unsigned long ttb; + unsigned long tea; + unsigned long mmucr; + unsigned long ptea; + unsigned long pascr; + unsigned long irmcr; + + /* Cache */ + unsigned long ccr; + unsigned long ramcr; +}; + +/* data area for low-level sleep code */ +struct sh_sleep_data { + /* current sleep mode (SUSP_SH_...) */ + unsigned long mode; + + /* addresses of board specific self-refresh snippets */ + unsigned long sf_pre; + unsigned long sf_post; + + /* address of resume code */ + unsigned long resume; + + /* register state saved and restored by the assembly code */ + unsigned long vbr; + unsigned long spc; + unsigned long sr; + unsigned long sp; + + /* structure for keeping register addresses */ + struct sh_sleep_regs addr; + + /* structure for saving/restoring register state */ + struct sh_sleep_regs data; +}; + +/* a bitmap of supported sleep modes (SUSP_SH..) */ +extern unsigned long sh_mobile_sleep_supported; + #endif /* flags passed to assembly suspend code */ @@ -27,5 +91,6 @@ static inline void sh_mobile_setup_cpuidle(void) {} #define SUSP_SH_RSTANDBY (1 << 2) /* SH-Mobile R-standby mode */ #define SUSP_SH_USTANDBY (1 << 3) /* SH-Mobile U-standby mode */ #define SUSP_SH_SF (1 << 4) /* Enable self-refresh */ +#define SUSP_SH_MMU (1 << 5) /* Save/restore MMU and cache */ #endif /* _ASM_SH_SUSPEND_H */ diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index b5c5acdc8c0e..c15415b4b169 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h @@ -171,10 +171,6 @@ BUILD_TRAP_HANDLER(fpu_error); BUILD_TRAP_HANDLER(fpu_state_restore); BUILD_TRAP_HANDLER(nmi); -#ifdef CONFIG_BUG -extern void handle_BUG(struct pt_regs *); -#endif - #define arch_align_stack(x) (x) struct mem_access { diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h index 607d413f6168..06814f5b59c7 100644 --- a/arch/sh/include/asm/system_32.h +++ b/arch/sh/include/asm/system_32.h @@ -232,4 +232,33 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs __regs); +static inline void set_bl_bit(void) +{ + unsigned long __dummy0, __dummy1; + + __asm__ __volatile__ ( + "stc sr, %0\n\t" + "or %2, %0\n\t" + "and %3, %0\n\t" + "ldc %0, sr\n\t" + : "=&r" (__dummy0), "=r" (__dummy1) + : "r" (0x10000000), "r" (0xffffff0f) + : "memory" + ); +} + +static inline void clear_bl_bit(void) +{ + unsigned long __dummy0, __dummy1; + + __asm__ __volatile__ ( + "stc sr, %0\n\t" + "and %2, %0\n\t" + "ldc %0, sr\n\t" + : "=&r" (__dummy0), "=r" (__dummy1) + : "1" (~0x10000000) + : "memory" + ); +} + #endif /* __ASM_SH_SYSTEM_32_H */ diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h index 8e4a03e7966c..ab1dd917ea87 100644 --- a/arch/sh/include/asm/system_64.h +++ b/arch/sh/include/asm/system_64.h @@ -12,6 +12,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. */ +#include <cpu/registers.h> #include <asm/processor.h> /* @@ -47,4 +48,29 @@ static inline reg_size_t register_align(void *val) return (unsigned long long)(signed long long)(signed long)val; } +#define SR_BL_LL 0x0000000010000000LL + +static inline void set_bl_bit(void) +{ + unsigned long long __dummy0, __dummy1 = SR_BL_LL; + + __asm__ __volatile__("getcon " __SR ", %0\n\t" + "or %0, %1, %0\n\t" + "putcon %0, " __SR "\n\t" + : "=&r" (__dummy0) + : "r" (__dummy1)); + +} + +static inline void clear_bl_bit(void) +{ + unsigned long long __dummy0, __dummy1 = ~SR_BL_LL; + + __asm__ __volatile__("getcon " __SR ", %0\n\t" + "and %0, %1, %0\n\t" + "putcon %0, " __SR "\n\t" + : "=&r" (__dummy0) + : "r" (__dummy1)); +} + #endif /* __ASM_SH_SYSTEM_64_H */ diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index bdeb9d46d17d..1f3d927e2265 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -19,6 +19,7 @@ struct thread_info { struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ + __u32 status; /* thread synchronous flags */ __u32 cpu; int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; /* thread address space */ @@ -50,6 +51,7 @@ struct thread_info { .task = &tsk, \ .exec_domain = &default_exec_domain, \ .flags = 0, \ + .status = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ @@ -111,13 +113,11 @@ extern void free_thread_info(struct thread_info *ti); #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ -#define TIF_RESTORE_SIGMASK 3 /* restore signal mask in do_signal() */ #define TIF_SINGLESTEP 4 /* singlestepping active */ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SECCOMP 6 /* secure computing */ #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ -#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 18 #define TIF_FREEZE 19 /* Freezing for suspend */ @@ -125,13 +125,11 @@ extern void free_thread_info(struct thread_info *ti); #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) -#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) -#define _TIF_USEDFPU (1 << TIF_USEDFPU) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_FREEZE (1 << TIF_FREEZE) @@ -149,13 +147,33 @@ extern void free_thread_info(struct thread_info *ti); /* work to do on any return to u-space */ #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ - _TIF_SINGLESTEP | _TIF_RESTORE_SIGMASK | \ - _TIF_NOTIFY_RESUME | _TIF_SYSCALL_TRACEPOINT) + _TIF_SINGLESTEP | _TIF_NOTIFY_RESUME | \ + _TIF_SYSCALL_TRACEPOINT) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)) +/* + * Thread-synchronous status. + * + * This is different from the flags in that nobody else + * ever touches our thread-synchronous status, so we don't + * have to worry about atomic accesses. + */ +#define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */ +#define TS_USEDFPU 0x0002 /* FPU used by this task this quantum */ + +#ifndef __ASSEMBLY__ +#define HAVE_SET_RESTORE_SIGMASK 1 +static inline void set_restore_sigmask(void) +{ + struct thread_info *ti = current_thread_info(); + ti->status |= TS_RESTORE_SIGMASK; + set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); +} +#endif /* !__ASSEMBLY__ */ + #endif /* __KERNEL__ */ #endif /* __ASM_SH_THREAD_INFO_H */ diff --git a/arch/sh/include/asm/timex.h b/arch/sh/include/asm/timex.h index b556d49e5f2b..18bf06d9c764 100644 --- a/arch/sh/include/asm/timex.h +++ b/arch/sh/include/asm/timex.h @@ -6,7 +6,17 @@ #ifndef __ASM_SH_TIMEX_H #define __ASM_SH_TIMEX_H +/* + * Only parts using the legacy CPG code for their clock framework + * implementation need to define their own Pclk value. If provided, this + * can be used for accurately setting CLOCK_TICK_RATE, otherwise we + * simply fall back on the i8253 PIT value. + */ +#ifdef CONFIG_SH_PCLK_FREQ #define CLOCK_TICK_RATE (CONFIG_SH_PCLK_FREQ / 4) /* Underlying HZ */ +#else +#define CLOCK_TICK_RATE 1193180 +#endif #include <asm-generic/timex.h> diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h index 65e7bd2f2240..37cdadd975ac 100644 --- a/arch/sh/include/asm/topology.h +++ b/arch/sh/include/asm/topology.h @@ -40,6 +40,14 @@ #endif +#define mc_capable() (1) + +const struct cpumask *cpu_coregroup_mask(unsigned int cpu); + +extern cpumask_t cpu_core_map[NR_CPUS]; + +#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) + #include <asm-generic/topology.h> #endif /* _ASM_SH_TOPOLOGY_H */ diff --git a/arch/sh/include/asm/ubc.h b/arch/sh/include/asm/ubc.h index 4ca4b7717371..9bf961684431 100644 --- a/arch/sh/include/asm/ubc.h +++ b/arch/sh/include/asm/ubc.h @@ -60,16 +60,5 @@ #define BRCR_UBDE (1 << 0) #endif -#ifndef __ASSEMBLY__ -/* arch/sh/kernel/cpu/ubc.S */ -extern void ubc_sleep(void); - -#ifdef CONFIG_UBC_WAKEUP -extern void ubc_wakeup(void); -#else -#define ubc_wakeup() do { } while (0) -#endif -#endif - #endif /* __KERNEL__ */ #endif /* __ASM_SH_UBC_H */ diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h index f3fd1b9eb6b1..f18c4f9baf27 100644 --- a/arch/sh/include/asm/unistd_32.h +++ b/arch/sh/include/asm/unistd_32.h @@ -345,8 +345,9 @@ #define __NR_pwritev 334 #define __NR_rt_tgsigqueueinfo 335 #define __NR_perf_event_open 336 +#define __NR_recvmmsg 337 -#define NR_syscalls 337 +#define NR_syscalls 338 #ifdef __KERNEL__ diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h index 343ce8f073ea..3e7645d11130 100644 --- a/arch/sh/include/asm/unistd_64.h +++ b/arch/sh/include/asm/unistd_64.h @@ -385,10 +385,11 @@ #define __NR_pwritev 362 #define __NR_rt_tgsigqueueinfo 363 #define __NR_perf_event_open 364 +#define __NR_recvmmsg 365 #ifdef __KERNEL__ -#define NR_syscalls 365 +#define NR_syscalls 366 #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/arch/sh/include/asm/watchdog.h b/arch/sh/include/asm/watchdog.h index 2fe7cee9e43a..19dfff5c8511 100644 --- a/arch/sh/include/asm/watchdog.h +++ b/arch/sh/include/asm/watchdog.h @@ -2,6 +2,8 @@ * include/asm-sh/watchdog.h * * Copyright (C) 2002, 2003 Paul Mundt + * Copyright (C) 2009 Siemens AG + * Copyright (C) 2009 Valentin Sitdikov * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -61,6 +63,61 @@ #define WTCSR_CKS_2048 0x06 #define WTCSR_CKS_4096 0x07 +#if defined(CONFIG_CPU_SUBTYPE_SH7785) || defined(CONFIG_CPU_SUBTYPE_SH7780) +/** + * sh_wdt_read_cnt - Read from Counter + * Reads back the WTCNT value. + */ +static inline __u32 sh_wdt_read_cnt(void) +{ + return ctrl_inl(WTCNT_R); +} + +/** + * sh_wdt_write_cnt - Write to Counter + * @val: Value to write + * + * Writes the given value @val to the lower byte of the timer counter. + * The upper byte is set manually on each write. + */ +static inline void sh_wdt_write_cnt(__u32 val) +{ + ctrl_outl((WTCNT_HIGH << 24) | (__u32)val, WTCNT); +} + +/** + * sh_wdt_write_bst - Write to Counter + * @val: Value to write + * + * Writes the given value @val to the lower byte of the timer counter. + * The upper byte is set manually on each write. + */ +static inline void sh_wdt_write_bst(__u32 val) +{ + ctrl_outl((WTBST_HIGH << 24) | (__u32)val, WTBST); +} +/** + * sh_wdt_read_csr - Read from Control/Status Register + * + * Reads back the WTCSR value. + */ +static inline __u32 sh_wdt_read_csr(void) +{ + return ctrl_inl(WTCSR_R); +} + +/** + * sh_wdt_write_csr - Write to Control/Status Register + * @val: Value to write + * + * Writes the given value @val to the lower byte of the control/status + * register. The upper byte is set manually on each write. + */ +static inline void sh_wdt_write_csr(__u32 val) +{ + ctrl_outl((WTCSR_HIGH << 24) | (__u32)val, WTCSR); +} +#else /** * sh_wdt_read_cnt - Read from Counter * Reads back the WTCNT value. @@ -103,6 +160,6 @@ static inline void sh_wdt_write_csr(__u8 val) { ctrl_outw((WTCSR_HIGH << 8) | (__u16)val, WTCSR); } - +#endif /* CONFIG_CPU_SUBTYPE_SH7785 || CONFIG_CPU_SUBTYPE_SH7780 */ #endif /* __KERNEL__ */ #endif /* __ASM_SH_WATCHDOG_H */ |