diff options
58 files changed, 4269 insertions, 787 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index a703b9e9aeb9..d868a11c94a5 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -62,6 +62,21 @@ Description: CPU topology files that describe kernel limits related to See Documentation/cputopology.txt for more information. +What: /sys/devices/system/cpu/probe + /sys/devices/system/cpu/release +Date: November 2009 +Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> +Description: Dynamic addition and removal of CPU's. This is not hotplug + removal, this is meant complete removal/addition of the CPU + from the system. + + probe: writes to this file will dynamically add a CPU to the + system. Information written to the file to add CPU's is + architecture specific. + + release: writes to this file dynamically remove a CPU from + the system. Information writtento the file to remove CPU's + is architecture specific. What: /sys/devices/system/cpu/cpu#/node Date: October 2009 diff --git a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt b/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt index 8447fd7090d0..ddd5ee32ea63 100644 --- a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt +++ b/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt @@ -103,7 +103,22 @@ fsl,mpc5200-gpt nodes --------------------- On the mpc5200 and 5200b, GPT0 has a watchdog timer function. If the board design supports the internal wdt, then the device node for GPT0 should -include the empty property 'fsl,has-wdt'. +include the empty property 'fsl,has-wdt'. Note that this does not activate +the watchdog. The timer will function as a GPT if the timer api is used, and +it will function as watchdog if the watchdog device is used. The watchdog +mode has priority over the gpt mode, i.e. if the watchdog is activated, any +gpt api call to this timer will fail with -EBUSY. + +If you add the property + fsl,wdt-on-boot = <n>; +GPT0 will be marked as in-use watchdog, i.e. blocking every gpt access to it. +If n>0, the watchdog is started with a timeout of n seconds. If n=0, the +configuration of the watchdog is not touched. This is useful in two cases: +- just mark GPT0 as watchdog, blocking gpt accesses, and configure it later; +- do not touch a configuration assigned by the boot loader which supervises + the boot process itself. + +The watchdog will respect the CONFIG_WATCHDOG_NOWAYOUT option. An mpc5200-gpt can be used as a single line GPIO controller. To do so, add the following properties to the gpt node: diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 5dbd375a3f2a..0df57466e783 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -320,6 +320,10 @@ config HOTPLUG_CPU Say N if you are unsure. +config ARCH_CPU_PROBE_RELEASE + def_bool y + depends on HOTPLUG_CPU + config ARCH_ENABLE_MEMORY_HOTPLUG def_bool y diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h index c9ca97f43bc1..81f3b0b5601e 100644 --- a/arch/powerpc/include/asm/kvm.h +++ b/arch/powerpc/include/asm/kvm.h @@ -47,7 +47,23 @@ struct kvm_regs { struct kvm_sregs { __u32 pvr; - char pad[1020]; + union { + struct { + __u64 sdr1; + struct { + struct { + __u64 slbe; + __u64 slbv; + } slb[64]; + } ppc64; + struct { + __u32 sr[16]; + __u64 ibat[8]; + __u64 dbat[8]; + } ppc32; + } s; + __u8 pad[1020]; + } u; }; struct kvm_fpu { diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 19ddb352fd0f..af2abe74f544 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -87,6 +87,7 @@ #define BOOK3S_IRQPRIO_MAX 16 #define BOOK3S_HFLAG_DCBZ32 0x1 +#define BOOK3S_HFLAG_SLB 0x2 #define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index c6011336371e..74b7369770d0 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -46,6 +46,7 @@ struct kvmppc_sr { }; struct kvmppc_bat { + u64 raw; u32 bepi; u32 bepi_mask; bool vs; @@ -113,6 +114,8 @@ extern struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, boo extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, bool data); extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr); extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); +extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, + bool upper, u32 val); extern u32 kvmppc_trampoline_lowmem; extern u32 kvmppc_trampoline_enter; diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 9efa2be78331..9f0fc9e6ce0d 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -266,6 +266,11 @@ struct machdep_calls { void (*suspend_disable_irqs)(void); void (*suspend_enable_irqs)(void); #endif + +#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE + ssize_t (*cpu_probe)(const char *, size_t); + ssize_t (*cpu_release)(const char *, size_t); +#endif }; extern void e500_idle(void); diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h index 079c06eae446..a062c57696d0 100644 --- a/arch/powerpc/include/asm/macio.h +++ b/arch/powerpc/include/asm/macio.h @@ -39,6 +39,7 @@ struct macio_dev struct macio_bus *bus; /* macio bus this device is on */ struct macio_dev *media_bay; /* Device is part of a media bay */ struct of_device ofdev; + struct device_dma_parameters dma_parms; /* ide needs that */ int n_resources; struct resource resource[MACIO_DEV_COUNT_RESOURCES]; int n_interrupts; @@ -78,6 +79,8 @@ static inline unsigned long macio_resource_len(struct macio_dev *dev, int resour return res->end - res->start + 1; } +extern int macio_enable_devres(struct macio_dev *dev); + extern int macio_request_resource(struct macio_dev *dev, int resource_no, const char *name); extern void macio_release_resource(struct macio_dev *dev, int resource_no); extern int macio_request_resources(struct macio_dev *dev, const char *name); @@ -131,6 +134,9 @@ struct macio_driver int (*resume)(struct macio_dev* dev); int (*shutdown)(struct macio_dev* dev); +#ifdef CONFIG_PMAC_MEDIABAY + void (*mediabay_event)(struct macio_dev* dev, int mb_state); +#endif struct device_driver driver; }; #define to_macio_driver(drv) container_of(drv,struct macio_driver, driver) diff --git a/arch/powerpc/include/asm/mediabay.h b/arch/powerpc/include/asm/mediabay.h index b2efb3325808..11037a4133ee 100644 --- a/arch/powerpc/include/asm/mediabay.h +++ b/arch/powerpc/include/asm/mediabay.h @@ -17,26 +17,31 @@ #define MB_POWER 6 /* media bay contains a Power device (???) */ #define MB_NO 7 /* media bay contains nothing */ -/* Number of bays in the machine or 0 */ -extern int media_bay_count; +struct macio_dev; -#ifdef CONFIG_BLK_DEV_IDE_PMAC -#include <linux/ide.h> +#ifdef CONFIG_PMAC_MEDIABAY -int check_media_bay_by_base(unsigned long base, int what); -/* called by IDE PMAC host driver to register IDE controller for media bay */ -int media_bay_set_ide_infos(struct device_node *which_bay, unsigned long base, - int irq, ide_hwif_t *hwif); +/* Check the content type of the bay, returns MB_NO if the bay is still + * transitionning + */ +extern int check_media_bay(struct macio_dev *bay); -int check_media_bay(struct device_node *which_bay, int what); +/* The ATA driver uses the calls below to temporarily hold on the + * media bay callbacks while initializing the interface + */ +extern void lock_media_bay(struct macio_dev *bay); +extern void unlock_media_bay(struct macio_dev *bay); #else -static inline int check_media_bay(struct device_node *which_bay, int what) +static inline int check_media_bay(struct macio_dev *bay) { - return -ENODEV; + return MB_NO; } +static inline void lock_media_bay(struct macio_dev *bay) { } +static inline void unlock_media_bay(struct macio_dev *bay) { } + #endif #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 7514ec2f8540..2102b214a87c 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -373,6 +373,38 @@ extern void slb_set_size(u16 size); #ifndef __ASSEMBLY__ +#ifdef CONFIG_PPC_SUBPAGE_PROT +/* + * For the sub-page protection option, we extend the PGD with one of + * these. Basically we have a 3-level tree, with the top level being + * the protptrs array. To optimize speed and memory consumption when + * only addresses < 4GB are being protected, pointers to the first + * four pages of sub-page protection words are stored in the low_prot + * array. + * Each page of sub-page protection words protects 1GB (4 bytes + * protects 64k). For the 3-level tree, each page of pointers then + * protects 8TB. + */ +struct subpage_prot_table { + unsigned long maxaddr; /* only addresses < this are protected */ + unsigned int **protptrs[2]; + unsigned int *low_prot[4]; +}; + +#define SBP_L1_BITS (PAGE_SHIFT - 2) +#define SBP_L2_BITS (PAGE_SHIFT - 3) +#define SBP_L1_COUNT (1 << SBP_L1_BITS) +#define SBP_L2_COUNT (1 << SBP_L2_BITS) +#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS) +#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS) + +extern void subpage_prot_free(struct mm_struct *mm); +extern void subpage_prot_init_new_context(struct mm_struct *mm); +#else +static inline void subpage_prot_free(struct mm_struct *mm) {} +static inline void subpage_prot_init_new_context(struct mm_struct *mm) { } +#endif /* CONFIG_PPC_SUBPAGE_PROT */ + typedef unsigned long mm_context_id_t; typedef struct { @@ -386,6 +418,9 @@ typedef struct { u16 sllp; /* SLB page size encoding */ #endif unsigned long vdso_base; +#ifdef CONFIG_PPC_SUBPAGE_PROT + struct subpage_prot_table spt; +#endif /* CONFIG_PPC_SUBPAGE_PROT */ } mm_context_t; diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h index 1b4f697abbdd..b664ce79a172 100644 --- a/arch/powerpc/include/asm/mpc52xx.h +++ b/arch/powerpc/include/asm/mpc52xx.h @@ -276,6 +276,53 @@ extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv); extern unsigned int mpc52xx_get_xtal_freq(struct device_node *node); extern void mpc52xx_restart(char *cmd); +/* mpc52xx_gpt.c */ +struct mpc52xx_gpt_priv; +extern struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq); +extern int mpc52xx_gpt_start_timer(struct mpc52xx_gpt_priv *gpt, u64 period, + int continuous); +extern u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt); +extern int mpc52xx_gpt_stop_timer(struct mpc52xx_gpt_priv *gpt); + +/* mpc52xx_lpbfifo.c */ +#define MPC52XX_LPBFIFO_FLAG_READ (0) +#define MPC52XX_LPBFIFO_FLAG_WRITE (1<<0) +#define MPC52XX_LPBFIFO_FLAG_NO_INCREMENT (1<<1) +#define MPC52XX_LPBFIFO_FLAG_NO_DMA (1<<2) +#define MPC52XX_LPBFIFO_FLAG_POLL_DMA (1<<3) + +struct mpc52xx_lpbfifo_request { + struct list_head list; + + /* localplus bus address */ + unsigned int cs; + size_t offset; + + /* Memory address */ + void *data; + phys_addr_t data_phys; + + /* Details of transfer */ + size_t size; + size_t pos; /* current position of transfer */ + int flags; + + /* What to do when finished */ + void (*callback)(struct mpc52xx_lpbfifo_request *); + + void *priv; /* Driver private data */ + + /* statistics */ + int irq_count; + int irq_ticks; + u8 last_byte; + int buffer_not_done_cnt; +}; + +extern int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req); +extern void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req); +extern void mpc52xx_lpbfifo_poll(void); + /* mpc52xx_pic.c */ extern void mpc52xx_init_irq(void); extern unsigned int mpc52xx_get_irq(void); diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h index e482e5352e69..d4b4bfa26fb3 100644 --- a/arch/powerpc/include/asm/pSeries_reconfig.h +++ b/arch/powerpc/include/asm/pSeries_reconfig.h @@ -17,6 +17,7 @@ #ifdef CONFIG_PPC_PSERIES extern int pSeries_reconfig_notifier_register(struct notifier_block *); extern void pSeries_reconfig_notifier_unregister(struct notifier_block *); +extern struct blocking_notifier_head pSeries_reconfig_chain; #else /* !CONFIG_PPC_PSERIES */ static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb) { diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 5c1cd73dafa8..605f5c5398d1 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -28,10 +28,6 @@ */ #define MAX_PGTABLE_INDEX_SIZE 0xf -#ifndef CONFIG_PPC_SUBPAGE_PROT -static inline void subpage_prot_free(pgd_t *pgd) {} -#endif - extern struct kmem_cache *pgtable_cache[]; #define PGT_CACHE(shift) (pgtable_cache[(shift)-1]) @@ -42,7 +38,6 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - subpage_prot_free(pgd); kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); } diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index 82b72207c51c..c4490f9c67c4 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h @@ -76,41 +76,4 @@ remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)) - -#ifdef CONFIG_PPC_SUBPAGE_PROT -/* - * For the sub-page protection option, we extend the PGD with one of - * these. Basically we have a 3-level tree, with the top level being - * the protptrs array. To optimize speed and memory consumption when - * only addresses < 4GB are being protected, pointers to the first - * four pages of sub-page protection words are stored in the low_prot - * array. - * Each page of sub-page protection words protects 1GB (4 bytes - * protects 64k). For the 3-level tree, each page of pointers then - * protects 8TB. - */ -struct subpage_prot_table { - unsigned long maxaddr; /* only addresses < this are protected */ - unsigned int **protptrs[2]; - unsigned int *low_prot[4]; -}; - -#undef PGD_TABLE_SIZE -#define PGD_TABLE_SIZE ((sizeof(pgd_t) << PGD_INDEX_SIZE) + \ - sizeof(struct subpage_prot_table)) - -#define SBP_L1_BITS (PAGE_SHIFT - 2) -#define SBP_L2_BITS (PAGE_SHIFT - 3) -#define SBP_L1_COUNT (1 << SBP_L1_BITS) -#define SBP_L2_COUNT (1 << SBP_L2_BITS) -#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS) -#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS) - -extern void subpage_prot_free(pgd_t *pgd); - -static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) -{ - return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD); -} -#endif /* CONFIG_PPC_SUBPAGE_PROT */ #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c index 1882bf419fa6..8dc7547c2377 100644 --- a/arch/powerpc/kernel/io.c +++ b/arch/powerpc/kernel/io.c @@ -161,7 +161,7 @@ void _memcpy_fromio(void *dest, const volatile void __iomem *src, dest++; n--; } - while(n > 4) { + while(n >= 4) { *((u32 *)dest) = *((volatile u32 *)vsrc); eieio(); vsrc += 4; @@ -190,7 +190,7 @@ void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n) vdest++; n--; } - while(n > 4) { + while(n >= 4) { *((volatile u32 *)vdest) = *((volatile u32 *)src); src += 4; vdest += 4; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 9b86a74d2815..97196eefef3e 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -218,6 +218,9 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) static void stop_this_cpu(void *dummy) { + /* Remove this CPU */ + set_cpu_online(smp_processor_id(), false); + local_irq_disable(); while (1) ; diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 956ab33fd73f..e235e52dc4fe 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -461,6 +461,25 @@ static void unregister_cpu_online(unsigned int cpu) cacheinfo_cpu_offline(cpu); } + +#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE +ssize_t arch_cpu_probe(const char *buf, size_t count) +{ + if (ppc_md.cpu_probe) + return ppc_md.cpu_probe(buf, count); + + return -EINVAL; +} + +ssize_t arch_cpu_release(const char *buf, size_t count) +{ + if (ppc_md.cpu_release) + return ppc_md.cpu_release(buf, count); + + return -EINVAL; +} +#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ + #endif /* CONFIG_HOTPLUG_CPU */ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 42037d46a416..3e294bd9b8c6 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -281,6 +281,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) { + vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; vcpu->arch.pvr = pvr; if ((pvr >= 0x330000) && (pvr < 0x70330000)) { kvmppc_mmu_book3s_64_init(vcpu); @@ -762,14 +763,62 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); + int i; + sregs->pvr = vcpu->arch.pvr; + + sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; + if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { + for (i = 0; i < 64; i++) { + sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i; + sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv; + } + } else { + for (i = 0; i < 16; i++) { + sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; + sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; + } + for (i = 0; i < 8; i++) { + sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; + sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; + } + } return 0; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); + int i; + kvmppc_set_pvr(vcpu, sregs->pvr); + + vcpu3s->sdr1 = sregs->u.s.sdr1; + if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { + for (i = 0; i < 64; i++) { + vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, + sregs->u.s.ppc64.slb[i].slbe); + } + } else { + for (i = 0; i < 16; i++) { + vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); + } + for (i = 0; i < 8; i++) { + kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, + (u32)sregs->u.s.ppc32.ibat[i]); + kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, + (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); + kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, + (u32)sregs->u.s.ppc32.dbat[i]); + kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, + (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); + } + } + + /* Flush the MMU after messing with the segments */ + kvmppc_mmu_pte_flush(vcpu, 0, 0); return 0; } diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c index c343e67306e0..1027eac6d474 100644 --- a/arch/powerpc/kvm/book3s_64_emulate.c +++ b/arch/powerpc/kvm/book3s_64_emulate.c @@ -185,7 +185,27 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, return emulated; } -static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u64 val) +void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, + u32 val) +{ + if (upper) { + /* Upper BAT */ + u32 bl = (val >> 2) & 0x7ff; + bat->bepi_mask = (~bl << 17); + bat->bepi = val & 0xfffe0000; + bat->vs = (val & 2) ? 1 : 0; + bat->vp = (val & 1) ? 1 : 0; + bat->raw = (bat->raw & 0xffffffff00000000ULL) | val; + } else { + /* Lower BAT */ + bat->brpn = val & 0xfffe0000; + bat->wimg = (val >> 3) & 0xf; + bat->pp = val & 3; + bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32); + } +} + +static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); struct kvmppc_bat *bat; @@ -207,19 +227,7 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u64 val) BUG(); } - if (!(sprn % 2)) { - /* Upper BAT */ - u32 bl = (val >> 2) & 0x7ff; - bat->bepi_mask = (~bl << 17); - bat->bepi = val & 0xfffe0000; - bat->vs = (val & 2) ? 1 : 0; - bat->vp = (val & 1) ? 1 : 0; - } else { - /* Lower BAT */ - bat->brpn = val & 0xfffe0000; - bat->wimg = (val >> 3) & 0xf; - bat->pp = val & 3; - } + kvmppc_set_bat(vcpu, bat, !(sprn % 2), val); } int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) @@ -243,7 +251,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) case SPRN_IBAT4U ... SPRN_IBAT7L: case SPRN_DBAT0U ... SPRN_DBAT3L: case SPRN_DBAT4U ... SPRN_DBAT7L: - kvmppc_write_bat(vcpu, sprn, vcpu->arch.gpr[rs]); + kvmppc_write_bat(vcpu, sprn, (u32)vcpu->arch.gpr[rs]); /* BAT writes happen so rarely that we're ok to flush * everything here */ kvmppc_mmu_pte_flush(vcpu, 0, 0); diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index a31f9c677d23..5598f88f142e 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -473,4 +473,6 @@ void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu) mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid; mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp; mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32; + + vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 692c3709011e..d82551efbfbf 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -144,6 +144,9 @@ int kvm_dev_ioctl_check_extension(long ext) int r; switch (ext) { + case KVM_CAP_PPC_SEGSTATE: + r = 1; + break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 6810128aba30..50f867d657df 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -835,9 +835,9 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr) * Result is 0: full permissions, _PAGE_RW: read-only, * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access. */ -static int subpage_protection(pgd_t *pgdir, unsigned long ea) +static int subpage_protection(struct mm_struct *mm, unsigned long ea) { - struct subpage_prot_table *spt = pgd_subpage_prot(pgdir); + struct subpage_prot_table *spt = &mm->context.spt; u32 spp = 0; u32 **sbpm, *sbpp; @@ -865,7 +865,7 @@ static int subpage_protection(pgd_t *pgdir, unsigned long ea) } #else /* CONFIG_PPC_SUBPAGE_PROT */ -static inline int subpage_protection(pgd_t *pgdir, unsigned long ea) +static inline int subpage_protection(struct mm_struct *mm, unsigned long ea) { return 0; } diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index b9e4cc2c2057..b910d37aea1a 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c @@ -76,6 +76,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) */ if (slice_mm_new_context(mm)) slice_set_user_psize(mm, mmu_virtual_psize); + subpage_prot_init_new_context(mm); mm->context.id = index; return 0; @@ -92,5 +93,6 @@ EXPORT_SYMBOL_GPL(__destroy_context); void destroy_context(struct mm_struct *mm) { __destroy_context(mm->context.id); + subpage_prot_free(mm); mm->context.id = NO_CONTEXT; } diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index 4cafc0c33d0a..a040b81e93bd 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c @@ -24,9 +24,9 @@ * Also makes sure that the subpage_prot_table structure is * reinitialized for the next user. */ -void subpage_prot_free(pgd_t *pgd) +void subpage_prot_free(struct mm_struct *mm) { - struct subpage_prot_table *spt = pgd_subpage_prot(pgd); + struct subpage_prot_table *spt = &mm->context.spt; unsigned long i, j, addr; u32 **p; @@ -51,6 +51,13 @@ void subpage_prot_free(pgd_t *pgd) spt->maxaddr = 0; } +void subpage_prot_init_new_context(struct mm_struct *mm) +{ + struct subpage_prot_table *spt = &mm->context.spt; + + memset(spt, 0, sizeof(*spt)); +} + static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, int npages) { @@ -87,7 +94,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, static void subpage_prot_clear(unsigned long addr, unsigned long len) { struct mm_struct *mm = current->mm; - struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd); + struct subpage_prot_table *spt = &mm->context.spt; u32 **spm, *spp; int i, nw; unsigned long next, limit; @@ -136,7 +143,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len) long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) { struct mm_struct *mm = current->mm; - struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd); + struct subpage_prot_table *spt = &mm->context.spt; u32 **spm, *spp; int i, nw; unsigned long next, limit; diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig index 8b8e9560a315..47ea1be1481b 100644 --- a/arch/powerpc/platforms/52xx/Kconfig +++ b/arch/powerpc/platforms/52xx/Kconfig @@ -62,3 +62,8 @@ config PPC_MPC5200_GPIO select GENERIC_GPIO help Enable gpiolib support for mpc5200 based boards + +config PPC_MPC5200_LPBFIFO + tristate "MPC5200 LocalPlus bus FIFO driver" + depends on PPC_MPC52xx + select PPC_BESTCOMM_GEN_BD diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile index bfd4f52cf3dd..2bc8cd0c5cfc 100644 --- a/arch/powerpc/platforms/52xx/Makefile +++ b/arch/powerpc/platforms/52xx/Makefile @@ -15,3 +15,4 @@ ifeq ($(CONFIG_PPC_LITE5200),y) endif obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o +obj-$(CONFIG_PPC_MPC5200_LPBFIFO) += mpc52xx_lpbfifo.o diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index 4d76b7f2336c..17ecdf4c87ae 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c @@ -16,8 +16,14 @@ * output signals or measure input signals. * * This driver supports the GPIO and IRQ controller functions of the GPT - * device. Timer functions are not yet supported, nor is the watchdog - * timer. + * device. Timer functions are not yet supported. + * + * The timer gpt0 can be used as watchdog (wdt). If the wdt mode is used, + * this prevents the use of any gpt0 gpt function (i.e. they will fail with + * -EBUSY). Thus, the safety wdt function always has precedence over the gpt + * function. If the kernel has been compiled with CONFIG_WATCHDOG_NOWAYOUT, + * this means that gpt0 is locked in wdt mode until the next reboot - this + * may be a requirement in safety applications. * * To use the GPIO function, the following two properties must be added * to the device tree node for the gpt device (typically in the .dts file @@ -46,17 +52,24 @@ * the output mode. This driver does not change the output mode setting. */ +#include <linux/device.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/list.h> +#include <linux/mutex.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_gpio.h> #include <linux/kernel.h> +#include <linux/watchdog.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <asm/div64.h> #include <asm/mpc52xx.h> MODULE_DESCRIPTION("Freescale MPC52xx gpt driver"); -MODULE_AUTHOR("Sascha Hauer, Grant Likely"); +MODULE_AUTHOR("Sascha Hauer, Grant Likely, Albrecht DreĂź"); MODULE_LICENSE("GPL"); /** @@ -66,18 +79,27 @@ MODULE_LICENSE("GPL"); * @lock: spinlock to coordinate between different functions. * @of_gc: of_gpio_chip instance structure; used when GPIO is enabled * @irqhost: Pointer to irq_host instance; used when IRQ mode is supported + * @wdt_mode: only relevant for gpt0: bit 0 (MPC52xx_GPT_CAN_WDT) indicates + * if the gpt may be used as wdt, bit 1 (MPC52xx_GPT_IS_WDT) indicates + * if the timer is actively used as wdt which blocks gpt functions */ struct mpc52xx_gpt_priv { + struct list_head list; /* List of all GPT devices */ struct device *dev; struct mpc52xx_gpt __iomem *regs; spinlock_t lock; struct irq_host *irqhost; + u32 ipb_freq; + u8 wdt_mode; #if defined(CONFIG_GPIOLIB) struct of_gpio_chip of_gc; #endif }; +LIST_HEAD(mpc52xx_gpt_list); +DEFINE_MUTEX(mpc52xx_gpt_list_mutex); + #define MPC52xx_GPT_MODE_MS_MASK (0x07) #define MPC52xx_GPT_MODE_MS_IC (0x01) #define MPC52xx_GPT_MODE_MS_OC (0x02) @@ -88,15 +110,25 @@ struct mpc52xx_gpt_priv { #define MPC52xx_GPT_MODE_GPIO_OUT_LOW (0x20) #define MPC52xx_GPT_MODE_GPIO_OUT_HIGH (0x30) +#define MPC52xx_GPT_MODE_COUNTER_ENABLE (0x1000) +#define MPC52xx_GPT_MODE_CONTINUOUS (0x0400) +#define MPC52xx_GPT_MODE_OPEN_DRAIN (0x0200) #define MPC52xx_GPT_MODE_IRQ_EN (0x0100) +#define MPC52xx_GPT_MODE_WDT_EN (0x8000) #define MPC52xx_GPT_MODE_ICT_MASK (0x030000) #define MPC52xx_GPT_MODE_ICT_RISING (0x010000) #define MPC52xx_GPT_MODE_ICT_FALLING (0x020000) #define MPC52xx_GPT_MODE_ICT_TOGGLE (0x030000) +#define MPC52xx_GPT_MODE_WDT_PING (0xa5) + #define MPC52xx_GPT_STATUS_IRQMASK (0x000f) +#define MPC52xx_GPT_CAN_WDT (1 << 0) +#define MPC52xx_GPT_IS_WDT (1 << 1) + + /* --------------------------------------------------------------------- * Cascaded interrupt controller hooks */ @@ -190,7 +222,7 @@ static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct, dev_dbg(gpt->dev, "%s: flags=%i\n", __func__, intspec[0]); - if ((intsize < 1) || (intspec[0] < 1) || (intspec[0] > 3)) { + if ((intsize < 1) || (intspec[0] > 3)) { dev_err(gpt->dev, "bad irq specifier in %s\n", ct->full_name); return -EINVAL; } @@ -211,13 +243,11 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node) { int cascade_virq; unsigned long flags; - - /* Only setup cascaded IRQ if device tree claims the GPT is - * an interrupt controller */ - if (!of_find_property(node, "interrupt-controller", NULL)) - return; + u32 mode; cascade_virq = irq_of_parse_and_map(node, 0); + if (!cascade_virq) + return; gpt->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 1, &mpc52xx_gpt_irq_ops, -1); @@ -227,14 +257,16 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node) } gpt->irqhost->host_data = gpt; - set_irq_data(cascade_virq, gpt); set_irq_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); - /* Set to Input Capture mode */ + /* If the GPT is currently disabled, then change it to be in Input + * Capture mode. If the mode is non-zero, then the pin could be + * already in use for something. */ spin_lock_irqsave(&gpt->lock, flags); - clrsetbits_be32(&gpt->regs->mode, MPC52xx_GPT_MODE_MS_MASK, - MPC52xx_GPT_MODE_MS_IC); + mode = in_be32(&gpt->regs->mode); + if ((mode & MPC52xx_GPT_MODE_MS_MASK) == 0) + out_be32(&gpt->regs->mode, mode | MPC52xx_GPT_MODE_MS_IC); spin_unlock_irqrestore(&gpt->lock, flags); dev_dbg(gpt->dev, "%s() complete. virq=%i\n", __func__, cascade_virq); @@ -335,6 +367,354 @@ static void mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *p, struct device_node *np) { } #endif /* defined(CONFIG_GPIOLIB) */ +/*********************************************************************** + * Timer API + */ + +/** + * mpc52xx_gpt_from_irq - Return the GPT device associated with an IRQ number + * @irq: irq of timer. + */ +struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq) +{ + struct mpc52xx_gpt_priv *gpt; + struct list_head *pos; + + /* Iterate over the list of timers looking for a matching device */ + mutex_lock(&mpc52xx_gpt_list_mutex); + list_for_each(pos, &mpc52xx_gpt_list) { + gpt = container_of(pos, struct mpc52xx_gpt_priv, list); + if (gpt->irqhost && irq == irq_linear_revmap(gpt->irqhost, 0)) { + mutex_unlock(&mpc52xx_gpt_list_mutex); + return gpt; + } + } + mutex_unlock(&mpc52xx_gpt_list_mutex); + + return NULL; +} +EXPORT_SYMBOL(mpc52xx_gpt_from_irq); + +static int mpc52xx_gpt_do_start(struct mpc52xx_gpt_priv *gpt, u64 period, + int continuous, int as_wdt) +{ + u32 clear, set; + u64 clocks; + u32 prescale; + unsigned long flags; + + clear = MPC52xx_GPT_MODE_MS_MASK | MPC52xx_GPT_MODE_CONTINUOUS; + set = MPC52xx_GPT_MODE_MS_GPIO | MPC52xx_GPT_MODE_COUNTER_ENABLE; + if (as_wdt) { + clear |= MPC52xx_GPT_MODE_IRQ_EN; + set |= MPC52xx_GPT_MODE_WDT_EN; + } else if (continuous) + set |= MPC52xx_GPT_MODE_CONTINUOUS; + + /* Determine the number of clocks in the requested period. 64 bit + * arithmatic is done here to preserve the precision until the value + * is scaled back down into the u32 range. Period is in 'ns', bus + * frequency is in Hz. */ + clocks = period * (u64)gpt->ipb_freq; + do_div(clocks, 1000000000); /* Scale it down to ns range */ + + /* This device cannot handle a clock count greater than 32 bits */ + if (clocks > 0xffffffff) + return -EINVAL; + + /* Calculate the prescaler and count values from the clocks value. + * 'clocks' is the number of clock ticks in the period. The timer + * has 16 bit precision and a 16 bit prescaler. Prescaler is + * calculated by integer dividing the clocks by 0x10000 (shifting + * down 16 bits) to obtain the smallest possible divisor for clocks + * to get a 16 bit count value. + * + * Note: the prescale register is '1' based, not '0' based. ie. a + * value of '1' means divide the clock by one. 0xffff divides the + * clock by 0xffff. '0x0000' does not divide by zero, but wraps + * around and divides by 0x10000. That is why prescale must be + * a u32 variable, not a u16, for this calculation. */ + prescale = (clocks >> 16) + 1; + do_div(clocks, prescale); + if (clocks > 0xffff) { + pr_err("calculation error; prescale:%x clocks:%llx\n", + prescale, clocks); + return -EINVAL; + } + + /* Set and enable the timer, reject an attempt to use a wdt as gpt */ + spin_lock_irqsave(&gpt->lock, flags); + if (as_wdt) + gpt->wdt_mode |= MPC52xx_GPT_IS_WDT; + else if ((gpt->wdt_mode & MPC52xx_GPT_IS_WDT) != 0) { + spin_unlock_irqrestore(&gpt->lock, flags); + return -EBUSY; + } + out_be32(&gpt->regs->count, prescale << 16 | clocks); + clrsetbits_be32(&gpt->regs->mode, clear, set); + spin_unlock_irqrestore(&gpt->lock, flags); + + return 0; +} + +/** + * mpc52xx_gpt_start_timer - Set and enable the GPT timer + * @gpt: Pointer to gpt private data structure + * @period: period of timer in ns; max. ~130s @ 33MHz IPB clock + * @continuous: set to 1 to make timer continuous free running + * + * An interrupt will be generated every time the timer fires + */ +int mpc52xx_gpt_start_timer(struct mpc52xx_gpt_priv *gpt, u64 period, + int continuous) +{ + return mpc52xx_gpt_do_start(gpt, period, continuous, 0); +} +EXPORT_SYMBOL(mpc52xx_gpt_start_timer); + +/** + * mpc52xx_gpt_stop_timer - Stop a gpt + * @gpt: Pointer to gpt private data structure + * + * Returns an error if attempting to stop a wdt + */ +int mpc52xx_gpt_stop_timer(struct mpc52xx_gpt_priv *gpt) +{ + unsigned long flags; + + /* reject the operation if the timer is used as watchdog (gpt 0 only) */ + spin_lock_irqsave(&gpt->lock, flags); + if ((gpt->wdt_mode & MPC52xx_GPT_IS_WDT) != 0) { + spin_unlock_irqrestore(&gpt->lock, flags); + return -EBUSY; + } + + clrbits32(&gpt->regs->mode, MPC52xx_GPT_MODE_COUNTER_ENABLE); + spin_unlock_irqrestore(&gpt->lock, flags); + return 0; +} +EXPORT_SYMBOL(mpc52xx_gpt_stop_timer); + +/** + * mpc52xx_gpt_timer_period - Read the timer period + * @gpt: Pointer to gpt private data structure + * + * Returns the timer period in ns + */ +u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt) +{ + u64 period; + u64 prescale; + unsigned long flags; + + spin_lock_irqsave(&gpt->lock, flags); + period = in_be32(&gpt->regs->count); + spin_unlock_irqrestore(&gpt->lock, flags); + + prescale = period >> 16; + period &= 0xffff; + if (prescale == 0) + prescale = 0x10000; + period = period * prescale * 1000000000ULL; + do_div(period, (u64)gpt->ipb_freq); + return period; +} +EXPORT_SYMBOL(mpc52xx_gpt_timer_period); + +#if defined(CONFIG_MPC5200_WDT) +/*********************************************************************** + * Watchdog API for gpt0 + */ + +#define WDT_IDENTITY "mpc52xx watchdog on GPT0" + +/* wdt_is_active stores wether or not the /dev/watchdog device is opened */ +static unsigned long wdt_is_active; + +/* wdt-capable gpt */ +static struct mpc52xx_gpt_priv *mpc52xx_gpt_wdt; + +/* low-level wdt functions */ +static inline void mpc52xx_gpt_wdt_ping(struct mpc52xx_gpt_priv *gpt_wdt) +{ + unsigned long flags; + + spin_lock_irqsave(&gpt_wdt->lock, flags); + out_8((u8 *) &gpt_wdt->regs->mode, MPC52xx_GPT_MODE_WDT_PING); + spin_unlock_irqrestore(&gpt_wdt->lock, flags); +} + +/* wdt misc device api */ +static ssize_t mpc52xx_wdt_write(struct file *file, const char __user *data, + size_t len, loff_t *ppos) +{ + struct mpc52xx_gpt_priv *gpt_wdt = file->private_data; + mpc52xx_gpt_wdt_ping(gpt_wdt); + return 0; +} + +static struct watchdog_info mpc5200_wdt_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, + .identity = WDT_IDENTITY, +}; + +static long mpc52xx_wdt_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct mpc52xx_gpt_priv *gpt_wdt = file->private_data; + int __user *data = (int __user *)arg; + int timeout; + u64 real_timeout; + int ret = 0; + + switch (cmd) { + case WDIOC_GETSUPPORT: + ret = copy_to_user(data, &mpc5200_wdt_info, + sizeof(mpc5200_wdt_info)); + if (ret) + ret = -EFAULT; + break; + + case WDIOC_GETSTATUS: + case WDIOC_GETBOOTSTATUS: + ret = put_user(0, data); + break; + + case WDIOC_KEEPALIVE: + mpc52xx_gpt_wdt_ping(gpt_wdt); + break; + + case WDIOC_SETTIMEOUT: + ret = get_user(timeout, data); + if (ret) + break; + real_timeout = (u64) timeout * 1000000000ULL; + ret = mpc52xx_gpt_do_start(gpt_wdt, real_timeout, 0, 1); + if (ret) + break; + /* fall through and return the timeout */ + + case WDIOC_GETTIMEOUT: + /* we need to round here as to avoid e.g. the following + * situation: + * - timeout requested is 1 second; + * - real timeout @33MHz is 999997090ns + * - the int divide by 10^9 will return 0. + */ + real_timeout = + mpc52xx_gpt_timer_period(gpt_wdt) + 500000000ULL; + do_div(real_timeout, 1000000000ULL); + timeout = (int) real_timeout; + ret = put_user(timeout, data); + break; + + default: + ret = -ENOTTY; + } + return ret; +} + +static int mpc52xx_wdt_open(struct inode *inode, struct file *file) +{ + int ret; + + /* sanity check */ + if (!mpc52xx_gpt_wdt) + return -ENODEV; + + /* /dev/watchdog can only be opened once */ + if (test_and_set_bit(0, &wdt_is_active)) + return -EBUSY; + + /* Set and activate the watchdog with 30 seconds timeout */ + ret = mpc52xx_gpt_do_start(mpc52xx_gpt_wdt, 30ULL * 1000000000ULL, + 0, 1); + if (ret) { + clear_bit(0, &wdt_is_active); + return ret; + } + + file->private_data = mpc52xx_gpt_wdt; + return nonseekable_open(inode, file); +} + +static int mpc52xx_wdt_release(struct inode *inode, struct file *file) +{ + /* note: releasing the wdt in NOWAYOUT-mode does not stop it */ +#if !defined(CONFIG_WATCHDOG_NOWAYOUT) + struct mpc52xx_gpt_priv *gpt_wdt = file->private_data; + unsigned long flags; + + spin_lock_irqsave(&gpt_wdt->lock, flags); + clrbits32(&gpt_wdt->regs->mode, + MPC52xx_GPT_MODE_COUNTER_ENABLE | MPC52xx_GPT_MODE_WDT_EN); + gpt_wdt->wdt_mode &= ~MPC52xx_GPT_IS_WDT; + spin_unlock_irqrestore(&gpt_wdt->lock, flags); +#endif + clear_bit(0, &wdt_is_active); + return 0; +} + + +static const struct file_operations mpc52xx_wdt_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .write = mpc52xx_wdt_write, + .unlocked_ioctl = mpc52xx_wdt_ioctl, + .open = mpc52xx_wdt_open, + .release = mpc52xx_wdt_release, +}; + +static struct miscdevice mpc52xx_wdt_miscdev = { + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &mpc52xx_wdt_fops, +}; + +static int __devinit mpc52xx_gpt_wdt_init(void) +{ + int err; + + /* try to register the watchdog misc device */ + err = misc_register(&mpc52xx_wdt_miscdev); + if (err) + pr_err("%s: cannot register watchdog device\n", WDT_IDENTITY); + else + pr_info("%s: watchdog device registered\n", WDT_IDENTITY); + return err; +} + +static int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt, + const u32 *period) +{ + u64 real_timeout; + + /* remember the gpt for the wdt operation */ + mpc52xx_gpt_wdt = gpt; + + /* configure the wdt if the device tree contained a timeout */ + if (!period || *period == 0) + return 0; + + real_timeout = (u64) *period * 1000000000ULL; + if (mpc52xx_gpt_do_start(gpt, real_timeout, 0, 1)) + dev_warn(gpt->dev, "starting as wdt failed\n"); + else + dev_info(gpt->dev, "watchdog set to %us timeout\n", *period); + return 0; +} + +#else + +static int __devinit mpc52xx_gpt_wdt_init(void) +{ + return 0; +} + +#define mpc52xx_gpt_wdt_setup(x, y) (0) + +#endif /* CONFIG_MPC5200_WDT */ + /* --------------------------------------------------------------------- * of_platform bus binding code */ @@ -349,6 +729,7 @@ static int __devinit mpc52xx_gpt_probe(struct of_device *ofdev, spin_lock_init(&gpt->lock); gpt->dev = &ofdev->dev; + gpt->ipb_freq = mpc5xxx_get_bus_frequency(ofdev->node); gpt->regs = of_iomap(ofdev->node, 0); if (!gpt->regs) { kfree(gpt); @@ -360,6 +741,26 @@ static int __devinit mpc52xx_gpt_probe(struct of_device *ofdev, mpc52xx_gpt_gpio_setup(gpt, ofdev->node); mpc52xx_gpt_irq_setup(gpt, ofdev->node); + mutex_lock(&mpc52xx_gpt_list_mutex); + list_add(&gpt->list, &mpc52xx_gpt_list); + mutex_unlock(&mpc52xx_gpt_list_mutex); + + /* check if this device could be a watchdog */ + if (of_get_property(ofdev->node, "fsl,has-wdt", NULL) || + of_get_property(ofdev->node, "has-wdt", NULL)) { + const u32 *on_boot_wdt; + + gpt->wdt_mode = MPC52xx_GPT_CAN_WDT; + on_boot_wdt = of_get_property(ofdev->node, "fsl,wdt-on-boot", + NULL); + if (on_boot_wdt) { + dev_info(gpt->dev, "used as watchdog\n"); + gpt->wdt_mode |= MPC52xx_GPT_IS_WDT; + } else + dev_info(gpt->dev, "can function as watchdog\n"); + mpc52xx_gpt_wdt_setup(gpt, on_boot_wdt); + } + return 0; } @@ -394,3 +795,4 @@ static int __init mpc52xx_gpt_init(void) /* Make sure GPIOs and IRQs get set up before anyone tries to use them */ subsys_initcall(mpc52xx_gpt_init); +device_initcall(mpc52xx_gpt_wdt_init); diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c new file mode 100644 index 000000000000..929d017535a3 --- /dev/null +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c @@ -0,0 +1,560 @@ +/* + * LocalPlus Bus FIFO driver for the Freescale MPC52xx. + * + * Copyright (C) 2009 Secret Lab Technologies Ltd. + * + * This file is released under the GPLv2 + * + * Todo: + * - Add support for multiple requests to be queued. + */ + +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/spinlock.h> +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/mpc52xx.h> +#include <asm/time.h> + +#include <sysdev/bestcomm/bestcomm.h> +#include <sysdev/bestcomm/bestcomm_priv.h> +#include <sysdev/bestcomm/gen_bd.h> + +MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); +MODULE_DESCRIPTION("MPC5200 LocalPlus FIFO device driver"); +MODULE_LICENSE("GPL"); + +#define LPBFIFO_REG_PACKET_SIZE (0x00) +#define LPBFIFO_REG_START_ADDRESS (0x04) +#define LPBFIFO_REG_CONTROL (0x08) +#define LPBFIFO_REG_ENABLE (0x0C) +#define LPBFIFO_REG_BYTES_DONE_STATUS (0x14) +#define LPBFIFO_REG_FIFO_DATA (0x40) +#define LPBFIFO_REG_FIFO_STATUS (0x44) +#define LPBFIFO_REG_FIFO_CONTROL (0x48) +#define LPBFIFO_REG_FIFO_ALARM (0x4C) + +struct mpc52xx_lpbfifo { + struct device *dev; + phys_addr_t regs_phys; + void __iomem *regs; + int irq; + spinlock_t lock; + + struct bcom_task *bcom_tx_task; + struct bcom_task *bcom_rx_task; + struct bcom_task *bcom_cur_task; + + /* Current state data */ + struct mpc52xx_lpbfifo_request *req; + int dma_irqs_enabled; +}; + +/* The MPC5200 has only one fifo, so only need one instance structure */ +static struct mpc52xx_lpbfifo lpbfifo; + +/** + * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transfered + */ +static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) +{ + size_t transfer_size = req->size - req->pos; + struct bcom_bd *bd; + void __iomem *reg; + u32 *data; + int i; + int bit_fields; + int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA); + int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE; + int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA; + + /* Set and clear the reset bits; is good practice in User Manual */ + out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); + + /* set master enable bit */ + out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000001); + if (!dma) { + /* While the FIFO can be setup for transfer sizes as large as + * 16M-1, the FIFO itself is only 512 bytes deep and it does + * not generate interrupts for FIFO full events (only transfer + * complete will raise an IRQ). Therefore when not using + * Bestcomm to drive the FIFO it needs to either be polled, or + * transfers need to constrained to the size of the fifo. + * + * This driver restricts the size of the transfer + */ + if (transfer_size > 512) + transfer_size = 512; + + /* Load the FIFO with data */ + if (write) { + reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA; + data = req->data + req->pos; + for (i = 0; i < transfer_size; i += 4) + out_be32(reg, *data++); + } + + /* Unmask both error and completion irqs */ + out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000301); + } else { + /* Choose the correct direction + * + * Configure the watermarks so DMA will always complete correctly. + * It may be worth experimenting with the ALARM value to see if + * there is a performance impacit. However, if it is wrong there + * is a risk of DMA not transferring the last chunk of data + */ + if (write) { + out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1e4); + out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 7); + lpbfifo.bcom_cur_task = lpbfifo.bcom_tx_task; + } else { + out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1ff); + out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 0); + lpbfifo.bcom_cur_task = lpbfifo.bcom_rx_task; + + if (poll_dma) { + if (lpbfifo.dma_irqs_enabled) { + disable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task)); + lpbfifo.dma_irqs_enabled = 0; + } + } else { + if (!lpbfifo.dma_irqs_enabled) { + enable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task)); + lpbfifo.dma_irqs_enabled = 1; + } + } + } + + bd = bcom_prepare_next_buffer(lpbfifo.bcom_cur_task); + bd->status = transfer_size; + if (!write) { + /* + * In the DMA read case, the DMA doesn't complete, + * possibly due to incorrect watermarks in the ALARM + * and CONTROL regs. For now instead of trying to + * determine the right watermarks that will make this + * work, just increase the number of bytes the FIFO is + * expecting. + * + * When submitting another operation, the FIFO will get + * reset, so the condition of the FIFO waiting for a + * non-existent 4 bytes will get cleared. + */ + transfer_size += 4; /* BLECH! */ + } + bd->data[0] = req->data_phys + req->pos; + bcom_submit_next_buffer(lpbfifo.bcom_cur_task, NULL); + + /* error irq & master enabled bit */ + bit_fields = 0x00000201; + + /* Unmask irqs */ + if (write && (!poll_dma)) + bit_fields |= 0x00000100; /* completion irq too */ + out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, bit_fields); + } + + /* Set transfer size, width, chip select and READ mode */ + out_be32(lpbfifo.regs + LPBFIFO_REG_START_ADDRESS, + req->offset + req->pos); + out_be32(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, transfer_size); + + bit_fields = req->cs << 24 | 0x000008; + if (!write) + bit_fields |= 0x010000; /* read mode */ + out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields); + + /* Kick it off */ + out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01); + if (dma) + bcom_enable(lpbfifo.bcom_cur_task); +} + +/** + * mpc52xx_lpbfifo_irq - IRQ handler for LPB FIFO + * + * On transmit, the dma completion irq triggers before the fifo completion + * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm + * task completion irq becuase everyting is not really done until the LPB FIFO + * completion irq triggers. + * + * In other words: + * For DMA, on receive, the "Fat Lady" is the bestcom completion irq. on + * transmit, the fifo completion irq is the "Fat Lady". The opera (or in this + * case the DMA/FIFO operation) is not finished until the "Fat Lady" sings. + * + * Reasons for entering this routine: + * 1) PIO mode rx and tx completion irq + * 2) DMA interrupt mode tx completion irq + * 3) DMA polled mode tx + * + * Exit conditions: + * 1) Transfer aborted + * 2) FIFO complete without DMA; more data to do + * 3) FIFO complete without DMA; all data transfered + * 4) FIFO complete using DMA + * + * Condition 1 can occur regardless of whether or not DMA is used. + * It requires executing the callback to report the error and exiting + * immediately. + * + * Condition 2 requires programming the FIFO with the next block of data + * + * Condition 3 requires executing the callback to report completion + * + * Condition 4 means the same as 3, except that we also retrieve the bcom + * buffer so DMA doesn't get clogged up. + * + * To make things trickier, the spinlock must be dropped before + * executing the callback, otherwise we could end up with a deadlock + * or nested spinlock condition. The out path is non-trivial, so + * extra fiddling is done to make sure all paths lead to the same + * outbound code. + */ +static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id) +{ + struct mpc52xx_lpbfifo_request *req; + u32 status = in_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS); + void __iomem *reg; + u32 *data; + int count, i; + int do_callback = 0; + u32 ts; + unsigned long flags; + int dma, write, poll_dma; + + spin_lock_irqsave(&lpbfifo.lock, flags); + ts = get_tbl(); + + req = lpbfifo.req; + if (!req) { + spin_unlock_irqrestore(&lpbfifo.lock, flags); + pr_err("bogus LPBFIFO IRQ\n"); + return IRQ_HANDLED; + } + + dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA); + write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE; + poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA; + + if (dma && !write) { + spin_unlock_irqrestore(&lpbfifo.lock, flags); + pr_err("bogus LPBFIFO IRQ (dma and not writting)\n"); + return IRQ_HANDLED; + } + + if ((status & 0x01) == 0) { + goto out; + } + + /* check abort bit */ + if (status & 0x10) { + out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); + do_callback = 1; + goto out; + } + + /* Read result from hardware */ + count = in_be32(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS); + count &= 0x00ffffff; + + if (!dma && !write) { + /* copy the data out of the FIFO */ + reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA; + data = req->data + req->pos; + for (i = 0; i < count; i += 4) + *data++ = in_be32(reg); + } + + /* Update transfer position and count */ + req->pos += count; + + /* Decide what to do next */ + if (req->size - req->pos) + mpc52xx_lpbfifo_kick(req); /* more work to do */ + else + do_callback = 1; + + out: + /* Clear the IRQ */ + out_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS, 0x01); + + if (dma && (status & 0x11)) { + /* + * Count the DMA as complete only when the FIFO completion + * status or abort bits are set. + * + * (status & 0x01) should always be the case except sometimes + * when using polled DMA. + * + * (status & 0x10) {transfer aborted}: This case needs more + * testing. + */ + bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL); + } + req->last_byte = ((u8 *)req->data)[req->size - 1]; + + /* When the do_callback flag is set; it means the transfer is finished + * so set the FIFO as idle */ + if (do_callback) + lpbfifo.req = NULL; + + if (irq != 0) /* don't increment on polled case */ + req->irq_count++; + + req->irq_ticks += get_tbl() - ts; + spin_unlock_irqrestore(&lpbfifo.lock, flags); + + /* Spinlock is released; it is now safe to call the callback */ + if (do_callback && req->callback) + req->callback(req); + + return IRQ_HANDLED; +} + +/** + * mpc52xx_lpbfifo_bcom_irq - IRQ handler for LPB FIFO Bestcomm task + * + * Only used when receiving data. + */ +static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id) +{ + struct mpc52xx_lpbfifo_request *req; + unsigned long flags; + u32 status; + u32 ts; + + spin_lock_irqsave(&lpbfifo.lock, flags); + ts = get_tbl(); + + req = lpbfifo.req; + if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) { + spin_unlock_irqrestore(&lpbfifo.lock, flags); + return IRQ_HANDLED; + } + + if (irq != 0) /* don't increment on polled case */ + req->irq_count++; + + if (!bcom_buffer_done(lpbfifo.bcom_cur_task)) { + spin_unlock_irqrestore(&lpbfifo.lock, flags); + + req->buffer_not_done_cnt++; + if ((req->buffer_not_done_cnt % 1000) == 0) + pr_err("transfer stalled\n"); + + return IRQ_HANDLED; + } + + bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL); + + req->last_byte = ((u8 *)req->data)[req->size - 1]; + + req->pos = status & 0x00ffffff; + + /* Mark the FIFO as idle */ + lpbfifo.req = NULL; + + /* Release the lock before calling out to the callback. */ + req->irq_ticks += get_tbl() - ts; + spin_unlock_irqrestore(&lpbfifo.lock, flags); + + if (req->callback) + req->callback(req); + + return IRQ_HANDLED; +} + +/** + * mpc52xx_lpbfifo_bcom_poll - Poll for DMA completion + */ +void mpc52xx_lpbfifo_poll(void) +{ + struct mpc52xx_lpbfifo_request *req = lpbfifo.req; + int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA); + int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE; + + /* + * For more information, see comments on the "Fat Lady" + */ + if (dma && write) + mpc52xx_lpbfifo_irq(0, NULL); + else + mpc52xx_lpbfifo_bcom_irq(0, NULL); +} +EXPORT_SYMBOL(mpc52xx_lpbfifo_poll); + +/** + * mpc52xx_lpbfifo_submit - Submit an LPB FIFO transfer request. + * @req: Pointer to request structure + */ +int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req) +{ + unsigned long flags; + + if (!lpbfifo.regs) + return -ENODEV; + + spin_lock_irqsave(&lpbfifo.lock, flags); + + /* If the req pointer is already set, then a transfer is in progress */ + if (lpbfifo.req) { + spin_unlock_irqrestore(&lpbfifo.lock, flags); + return -EBUSY; + } + + /* Setup the transfer */ + lpbfifo.req = req; + req->irq_count = 0; + req->irq_ticks = 0; + req->buffer_not_done_cnt = 0; + req->pos = 0; + + mpc52xx_lpbfifo_kick(req); + spin_unlock_irqrestore(&lpbfifo.lock, flags); + return 0; +} +EXPORT_SYMBOL(mpc52xx_lpbfifo_submit); + +void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&lpbfifo.lock, flags); + if (lpbfifo.req == req) { + /* Put it into reset and clear the state */ + bcom_gen_bd_rx_reset(lpbfifo.bcom_rx_task); + bcom_gen_bd_tx_reset(lpbfifo.bcom_tx_task); + out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); + lpbfifo.req = NULL; + } + spin_unlock_irqrestore(&lpbfifo.lock, flags); +} +EXPORT_SYMBOL(mpc52xx_lpbfifo_abort); + +static int __devinit +mpc52xx_lpbfifo_probe(struct of_device *op, const struct of_device_id *match) +{ + struct resource res; + int rc = -ENOMEM; + + if (lpbfifo.dev != NULL) + return -ENOSPC; + + lpbfifo.irq = irq_of_parse_and_map(op->node, 0); + if (!lpbfifo.irq) + return -ENODEV; + + if (of_address_to_resource(op->node, 0, &res)) + return -ENODEV; + lpbfifo.regs_phys = res.start; + lpbfifo.regs = of_iomap(op->node, 0); + if (!lpbfifo.regs) + return -ENOMEM; + + spin_lock_init(&lpbfifo.lock); + + /* Put FIFO into reset */ + out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); + + /* Register the interrupt handler */ + rc = request_irq(lpbfifo.irq, mpc52xx_lpbfifo_irq, 0, + "mpc52xx-lpbfifo", &lpbfifo); + if (rc) + goto err_irq; + + /* Request the Bestcomm receive (fifo --> memory) task and IRQ */ + lpbfifo.bcom_rx_task = + bcom_gen_bd_rx_init(2, res.start + LPBFIFO_REG_FIFO_DATA, + BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC, + 16*1024*1024); + if (!lpbfifo.bcom_rx_task) + goto err_bcom_rx; + + rc = request_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), + mpc52xx_lpbfifo_bcom_irq, 0, + "mpc52xx-lpbfifo-rx", &lpbfifo); + if (rc) + goto err_bcom_rx_irq; + + /* Request the Bestcomm transmit (memory --> fifo) task and IRQ */ + lpbfifo.bcom_tx_task = + bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA, + BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC); + if (!lpbfifo.bcom_tx_task) + goto err_bcom_tx; + + lpbfifo.dev = &op->dev; + return 0; + + err_bcom_tx: + free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo); + err_bcom_rx_irq: + bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task); + err_bcom_rx: + err_irq: + iounmap(lpbfifo.regs); + lpbfifo.regs = NULL; + + dev_err(&op->dev, "mpc52xx_lpbfifo_probe() failed\n"); + return -ENODEV; +} + + +static int __devexit mpc52xx_lpbfifo_remove(struct of_device *op) +{ + if (lpbfifo.dev != &op->dev) + return 0; + + /* Put FIFO in reset */ + out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); + + /* Release the bestcomm transmit task */ + free_irq(bcom_get_task_irq(lpbfifo.bcom_tx_task), &lpbfifo); + bcom_gen_bd_tx_release(lpbfifo.bcom_tx_task); + + /* Release the bestcomm receive task */ + free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo); + bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task); + + free_irq(lpbfifo.irq, &lpbfifo); + iounmap(lpbfifo.regs); + lpbfifo.regs = NULL; + lpbfifo.dev = NULL; + + return 0; +} + +static struct of_device_id mpc52xx_lpbfifo_match[] __devinitconst = { + { .compatible = "fsl,mpc5200-lpbfifo", }, + {}, +}; + +static struct of_platform_driver mpc52xx_lpbfifo_driver = { + .owner = THIS_MODULE, + .name = "mpc52xx-lpbfifo", + .match_table = mpc52xx_lpbfifo_match, + .probe = mpc52xx_lpbfifo_probe, + .remove = __devexit_p(mpc52xx_lpbfifo_remove), +}; + +/*********************************************************************** + * Module init/exit + */ +static int __init mpc52xx_lpbfifo_init(void) +{ + pr_debug("Registering LocalPlus bus FIFO driver\n"); + return of_register_platform_driver(&mpc52xx_lpbfifo_driver); +} +module_init(mpc52xx_lpbfifo_init); + +static void __exit mpc52xx_lpbfifo_exit(void) +{ + pr_debug("Unregistering LocalPlus bus FIFO driver\n"); + of_unregister_platform_driver(&mpc52xx_lpbfifo_driver); +} +module_exit(mpc52xx_lpbfifo_exit); diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 4b1c422b8145..0ff5174ae4f5 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -8,7 +8,7 @@ endif obj-y := lpar.o hvCall.o nvram.o reconfig.o \ setup.o iommu.o ras.o \ - firmware.o power.o + firmware.o power.o dlpar.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_XICS) += xics.o obj-$(CONFIG_SCANLOG) += scanlog.o diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c new file mode 100644 index 000000000000..fd2f0afeb4de --- /dev/null +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -0,0 +1,558 @@ +/* + * Support for dynamic reconfiguration for PCI, Memory, and CPU + * Hotplug and Dynamic Logical Partitioning on RPA platforms. + * + * Copyright (C) 2009 Nathan Fontenot + * Copyright (C) 2009 IBM Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/kref.h> +#include <linux/notifier.h> +#include <linux/proc_fs.h> +#include <linux/spinlock.h> +#include <linux/cpu.h> +#include "offline_states.h" + +#include <asm/prom.h> +#include <asm/machdep.h> +#include <asm/uaccess.h> +#include <asm/rtas.h> +#include <asm/pSeries_reconfig.h> + +struct cc_workarea { + u32 drc_index; + u32 zero; + u32 name_offset; + u32 prop_length; + u32 prop_offset; +}; + +static void dlpar_free_cc_property(struct property *prop) +{ + kfree(prop->name); + kfree(prop->value); + kfree(prop); +} + +static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa) +{ + struct property *prop; + char *name; + char *value; + + prop = kzalloc(sizeof(*prop), GFP_KERNEL); + if (!prop) + return NULL; + + name = (char *)ccwa + ccwa->name_offset; + prop->name = kstrdup(name, GFP_KERNEL); + + prop->length = ccwa->prop_length; + value = (char *)ccwa + ccwa->prop_offset; + prop->value = kzalloc(prop->length, GFP_KERNEL); + if (!prop->value) { + dlpar_free_cc_property(prop); + return NULL; + } + + memcpy(prop->value, value, prop->length); + return prop; +} + +static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa) +{ + struct device_node *dn; + char *name; + + dn = kzalloc(sizeof(*dn), GFP_KERNEL); + if (!dn) + return NULL; + + /* The configure connector reported name does not contain a + * preceeding '/', so we allocate a buffer large enough to + * prepend this to the full_name. + */ + name = (char *)ccwa + ccwa->name_offset; + dn->full_name = kmalloc(strlen(name) + 2, GFP_KERNEL); + if (!dn->full_name) { + kfree(dn); + return NULL; + } + + sprintf(dn->full_name, "/%s", name); + return dn; +} + +static void dlpar_free_one_cc_node(struct device_node *dn) +{ + struct property *prop; + + while (dn->properties) { + prop = dn->properties; + dn->properties = prop->next; + dlpar_free_cc_property(prop); + } + + kfree(dn->full_name); + kfree(dn); +} + +static void dlpar_free_cc_nodes(struct device_node *dn) +{ + if (dn->child) + dlpar_free_cc_nodes(dn->child); + + if (dn->sibling) + dlpar_free_cc_nodes(dn->sibling); + + dlpar_free_one_cc_node(dn); +} + +#define NEXT_SIBLING 1 +#define NEXT_CHILD 2 +#define NEXT_PROPERTY 3 +#define PREV_PARENT 4 +#define MORE_MEMORY 5 +#define CALL_AGAIN -2 +#define ERR_CFG_USE -9003 + +struct device_node *dlpar_configure_connector(u32 drc_index) +{ + struct device_node *dn; + struct device_node *first_dn = NULL; + struct device_node *last_dn = NULL; + struct property *property; + struct property *last_property = NULL; + struct cc_workarea *ccwa; + int cc_token; + int rc; + + cc_token = rtas_token("ibm,configure-connector"); + if (cc_token == RTAS_UNKNOWN_SERVICE) + return NULL; + + spin_lock(&rtas_data_buf_lock); + ccwa = (struct cc_workarea *)&rtas_data_buf[0]; + ccwa->drc_index = drc_index; + ccwa->zero = 0; + + rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); + while (rc) { + switch (rc) { + case NEXT_SIBLING: + dn = dlpar_parse_cc_node(ccwa); + if (!dn) + goto cc_error; + + dn->parent = last_dn->parent; + last_dn->sibling = dn; + last_dn = dn; + break; + + case NEXT_CHILD: + dn = dlpar_parse_cc_node(ccwa); + if (!dn) + goto cc_error; + + if (!first_dn) + first_dn = dn; + else { + dn->parent = last_dn; + if (last_dn) + last_dn->child = dn; + } + + last_dn = dn; + break; + + case NEXT_PROPERTY: + property = dlpar_parse_cc_property(ccwa); + if (!property) + goto cc_error; + + if (!last_dn->properties) + last_dn->properties = property; + else + last_property->next = property; + + last_property = property; + break; + + case PREV_PARENT: + last_dn = last_dn->parent; + break; + + case CALL_AGAIN: + break; + + case MORE_MEMORY: + case ERR_CFG_USE: + default: + printk(KERN_ERR "Unexpected Error (%d) " + "returned from configure-connector\n", rc); + goto cc_error; + } + + rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); + } + + spin_unlock(&rtas_data_buf_lock); + return first_dn; + +cc_error: + if (first_dn) + dlpar_free_cc_nodes(first_dn); + spin_unlock(&rtas_data_buf_lock); + return NULL; +} + +static struct device_node *derive_parent(const char *path) +{ + struct device_node *parent; + char *last_slash; + + last_slash = strrchr(path, '/'); + if (last_slash == path) { + parent = of_find_node_by_path("/"); + } else { + char *parent_path; + int parent_path_len = last_slash - path + 1; + parent_path = kmalloc(parent_path_len, GFP_KERNEL); + if (!parent_path) + return NULL; + + strlcpy(parent_path, path, parent_path_len); + parent = of_find_node_by_path(parent_path); + kfree(parent_path); + } + + return parent; +} + +int dlpar_attach_node(struct device_node *dn) +{ + struct proc_dir_entry *ent; + int rc; + + of_node_set_flag(dn, OF_DYNAMIC); + kref_init(&dn->kref); + dn->parent = derive_parent(dn->full_name); + if (!dn->parent) + return -ENOMEM; + + rc = blocking_notifier_call_chain(&pSeries_reconfig_chain, + PSERIES_RECONFIG_ADD, dn); + if (rc == NOTIFY_BAD) { + printk(KERN_ERR "Failed to add device node %s\n", + dn->full_name); + return -ENOMEM; /* For now, safe to assume kmalloc failure */ + } + + of_attach_node(dn); + +#ifdef CONFIG_PROC_DEVICETREE + ent = proc_mkdir(strrchr(dn->full_name, '/') + 1, dn->parent->pde); + if (ent) + proc_device_tree_add_node(dn, ent); +#endif + + of_node_put(dn->parent); + return 0; +} + +int dlpar_detach_node(struct device_node *dn) +{ + struct device_node *parent = dn->parent; + struct property *prop = dn->properties; + +#ifdef CONFIG_PROC_DEVICETREE + while (prop) { + remove_proc_entry(prop->name, dn->pde); + prop = prop->next; + } + + if (dn->pde) + remove_proc_entry(dn->pde->name, parent->pde); +#endif + + blocking_notifier_call_chain(&pSeries_reconfig_chain, + PSERIES_RECONFIG_REMOVE, dn); + of_detach_node(dn); + of_node_put(dn); /* Must decrement the refcount */ + + return 0; +} + +int online_node_cpus(struct device_node *dn) +{ + int rc = 0; + unsigned int cpu; + int len, nthreads, i; + const u32 *intserv; + + intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); + if (!intserv) + return -EINVAL; + + nthreads = len / sizeof(u32); + + cpu_maps_update_begin(); + for (i = 0; i < nthreads; i++) { + for_each_present_cpu(cpu) { + if (get_hard_smp_processor_id(cpu) != intserv[i]) + continue; + BUG_ON(get_cpu_current_state(cpu) + != CPU_STATE_OFFLINE); + cpu_maps_update_done(); + rc = cpu_up(cpu); + if (rc) + goto out; + cpu_maps_update_begin(); + + break; + } + if (cpu == num_possible_cpus()) + printk(KERN_WARNING "Could not find cpu to online " + "with physical id 0x%x\n", intserv[i]); + } + cpu_maps_update_done(); + +out: + return rc; + +} + +int offline_node_cpus(struct device_node *dn) +{ + int rc = 0; + unsigned int cpu; + int len, nthreads, i; + const u32 *intserv; + + intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); + if (!intserv) + return -EINVAL; + + nthreads = len / sizeof(u32); + + cpu_maps_update_begin(); + for (i = 0; i < nthreads; i++) { + for_each_present_cpu(cpu) { + if (get_hard_smp_processor_id(cpu) != intserv[i]) + continue; + + if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE) + break; + + if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { + cpu_maps_update_done(); + rc = cpu_down(cpu); + if (rc) + goto out; + cpu_maps_update_begin(); + break; + + } + + /* + * The cpu is in CPU_STATE_INACTIVE. + * Upgrade it's state to CPU_STATE_OFFLINE. + */ + set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); + BUG_ON(plpar_hcall_norets(H_PROD, intserv[i]) + != H_SUCCESS); + __cpu_die(cpu); + break; + } + if (cpu == num_possible_cpus()) + printk(KERN_WARNING "Could not find cpu to offline " + "with physical id 0x%x\n", intserv[i]); + } + cpu_maps_update_done(); + +out: + return rc; + +} + +#define DR_ENTITY_SENSE 9003 +#define DR_ENTITY_PRESENT 1 +#define DR_ENTITY_UNUSABLE 2 +#define ALLOCATION_STATE 9003 +#define ALLOC_UNUSABLE 0 +#define ALLOC_USABLE 1 +#define ISOLATION_STATE 9001 +#define ISOLATE 0 +#define UNISOLATE 1 + +int dlpar_acquire_drc(u32 drc_index) +{ + int dr_status, rc; + + rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, + DR_ENTITY_SENSE, drc_index); + if (rc || dr_status != DR_ENTITY_UNUSABLE) + return -1; + + rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE); + if (rc) + return rc; + + rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); + if (rc) { + rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE); + return rc; + } + + return 0; +} + +int dlpar_release_drc(u32 drc_index) +{ + int dr_status, rc; + + rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, + DR_ENTITY_SENSE, drc_index); + if (rc || dr_status != DR_ENTITY_PRESENT) + return -1; + + rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE); + if (rc) + return rc; + + rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE); + if (rc) { + rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); + return rc; + } + + return 0; +} + +#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE + +static DEFINE_MUTEX(pseries_cpu_hotplug_mutex); + +void cpu_hotplug_driver_lock() +{ + mutex_lock(&pseries_cpu_hotplug_mutex); +} + +void cpu_hotplug_driver_unlock() +{ + mutex_unlock(&pseries_cpu_hotplug_mutex); +} + +static ssize_t dlpar_cpu_probe(const char *buf, size_t count) +{ + struct device_node *dn; + unsigned long drc_index; + char *cpu_name; + int rc; + + cpu_hotplug_driver_lock(); + rc = strict_strtoul(buf, 0, &drc_index); + if (rc) { + rc = -EINVAL; + goto out; + } + + dn = dlpar_configure_connector(drc_index); + if (!dn) { + rc = -EINVAL; + goto out; + } + + /* configure-connector reports cpus as living in the base + * directory of the device tree. CPUs actually live in the + * cpus directory so we need to fixup the full_name. + */ + cpu_name = kzalloc(strlen(dn->full_name) + strlen("/cpus") + 1, + GFP_KERNEL); + if (!cpu_name) { + dlpar_free_cc_nodes(dn); + rc = -ENOMEM; + goto out; + } + + sprintf(cpu_name, "/cpus%s", dn->full_name); + kfree(dn->full_name); + dn->full_name = cpu_name; + + rc = dlpar_acquire_drc(drc_index); + if (rc) { + dlpar_free_cc_nodes(dn); + rc = -EINVAL; + goto out; + } + + rc = dlpar_attach_node(dn); + if (rc) { + dlpar_release_drc(drc_index); + dlpar_free_cc_nodes(dn); + } + + rc = online_node_cpus(dn); +out: + cpu_hotplug_driver_unlock(); + + return rc ? rc : count; +} + +static ssize_t dlpar_cpu_release(const char *buf, size_t count) +{ + struct device_node *dn; + const u32 *drc_index; + int rc; + + dn = of_find_node_by_path(buf); + if (!dn) + return -EINVAL; + + drc_index = of_get_property(dn, "ibm,my-drc-index", NULL); + if (!drc_index) { + of_node_put(dn); + return -EINVAL; + } + + cpu_hotplug_driver_lock(); + rc = offline_node_cpus(dn); + if (rc) { + of_node_put(dn); + rc = -EINVAL; + goto out; + } + + rc = dlpar_release_drc(*drc_index); + if (rc) { + of_node_put(dn); + goto out; + } + + rc = dlpar_detach_node(dn); + if (rc) { + dlpar_acquire_drc(*drc_index); + goto out; + } + + of_node_put(dn); +out: + cpu_hotplug_driver_unlock(); + return rc ? rc : count; +} + +static int __init pseries_dlpar_init(void) +{ + ppc_md.cpu_probe = dlpar_cpu_probe; + ppc_md.cpu_release = dlpar_cpu_release; + + return 0; +} +machine_device_initcall(pseries, pseries_dlpar_init); + +#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 5182d2b992c6..a2305d29bbbd 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -96,7 +96,7 @@ static struct device_node *derive_parent(const char *path) return parent; } -static BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain); +BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain); int pSeries_reconfig_notifier_register(struct notifier_block *nb) { diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index f2df6e2a224c..51eea3000b55 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -781,5 +781,15 @@ config PATA_BF54X If unsure, say N. +config PATA_MACIO + tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE" + depends on PPC_PMAC + help + Most IDE capable PowerMacs have IDE busses driven by a variant + of this controller which is part of the Apple chipset used on + most PowerMac models. Some models have multiple busses using + different chipsets, though generally, MacIO is one of them. + + endif # ATA_SFF endif # ATA diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 01e126f343b3..e439141d423e 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_SATA_MV) += sata_mv.o obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o obj-$(CONFIG_PDC_ADMA) += pdc_adma.o obj-$(CONFIG_SATA_FSL) += sata_fsl.o +obj-$(CONFIG_PATA_MACIO) += pata_macio.o obj-$(CONFIG_PATA_ALI) += pata_ali.o obj-$(CONFIG_PATA_AMD) += pata_amd.o diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index bbbb1fab1755..51eb1e298601 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -2384,7 +2384,7 @@ void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc) ap->hsm_task_state = HSM_ST_IDLE; if (ap->ioaddr.bmdma_addr) - ata_bmdma_stop(qc); + ap->ops->bmdma_stop(qc); spin_unlock_irqrestore(ap->lock, flags); } diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c new file mode 100644 index 000000000000..4cc7bbd10ec2 --- /dev/null +++ b/drivers/ata/pata_macio.c @@ -0,0 +1,1427 @@ +/* + * Libata based driver for Apple "macio" family of PATA controllers + * + * Copyright 2008/2009 Benjamin Herrenschmidt, IBM Corp + * <benh@kernel.crashing.org> + * + * Some bits and pieces from drivers/ide/ppc/pmac.c + * + */ + +#undef DEBUG +#undef DEBUG_DMA + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/blkdev.h> +#include <linux/ata.h> +#include <linux/libata.h> +#include <linux/adb.h> +#include <linux/pmu.h> +#include <linux/scatterlist.h> +#include <linux/of.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_device.h> + +#include <asm/macio.h> +#include <asm/io.h> +#include <asm/dbdma.h> +#include <asm/pci-bridge.h> +#include <asm/machdep.h> +#include <asm/pmac_feature.h> +#include <asm/mediabay.h> + +#ifdef DEBUG_DMA +#define dev_dbgdma(dev, format, arg...) \ + dev_printk(KERN_DEBUG , dev , format , ## arg) +#else +#define dev_dbgdma(dev, format, arg...) \ + ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; }) +#endif + +#define DRV_NAME "pata_macio" +#define DRV_VERSION "0.9" + +/* Models of macio ATA controller */ +enum { + controller_ohare, /* OHare based */ + controller_heathrow, /* Heathrow/Paddington */ + controller_kl_ata3, /* KeyLargo ATA-3 */ + controller_kl_ata4, /* KeyLargo ATA-4 */ + controller_un_ata6, /* UniNorth2 ATA-6 */ + controller_k2_ata6, /* K2 ATA-6 */ + controller_sh_ata6, /* Shasta ATA-6 */ +}; + +static const char* macio_ata_names[] = { + "OHare ATA", /* OHare based */ + "Heathrow ATA", /* Heathrow/Paddington */ + "KeyLargo ATA-3", /* KeyLargo ATA-3 (MDMA only) */ + "KeyLargo ATA-4", /* KeyLargo ATA-4 (UDMA/66) */ + "UniNorth ATA-6", /* UniNorth2 ATA-6 (UDMA/100) */ + "K2 ATA-6", /* K2 ATA-6 (UDMA/100) */ + "Shasta ATA-6", /* Shasta ATA-6 (UDMA/133) */ +}; + +/* + * Extra registers, both 32-bit little-endian + */ +#define IDE_TIMING_CONFIG 0x200 +#define IDE_INTERRUPT 0x300 + +/* Kauai (U2) ATA has different register setup */ +#define IDE_KAUAI_PIO_CONFIG 0x200 +#define IDE_KAUAI_ULTRA_CONFIG 0x210 +#define IDE_KAUAI_POLL_CONFIG 0x220 + +/* + * Timing configuration register definitions + */ + +/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */ +#define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS) +#define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS) +#define IDE_SYSCLK_NS 30 /* 33Mhz cell */ +#define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */ + +/* 133Mhz cell, found in shasta. + * See comments about 100 Mhz Uninorth 2... + * Note that PIO_MASK and MDMA_MASK seem to overlap, that's just + * weird and I don't now why .. at this stage + */ +#define TR_133_PIOREG_PIO_MASK 0xff000fff +#define TR_133_PIOREG_MDMA_MASK 0x00fff800 +#define TR_133_UDMAREG_UDMA_MASK 0x0003ffff +#define TR_133_UDMAREG_UDMA_EN 0x00000001 + +/* 100Mhz cell, found in Uninorth 2 and K2. It appears as a pci device + * (106b/0033) on uninorth or K2 internal PCI bus and it's clock is + * controlled like gem or fw. It appears to be an evolution of keylargo + * ATA4 with a timing register extended to 2x32bits registers (one + * for PIO & MWDMA and one for UDMA, and a similar DBDMA channel. + * It has it's own local feature control register as well. + * + * After scratching my mind over the timing values, at least for PIO + * and MDMA, I think I've figured the format of the timing register, + * though I use pre-calculated tables for UDMA as usual... + */ +#define TR_100_PIO_ADDRSETUP_MASK 0xff000000 /* Size of field unknown */ +#define TR_100_PIO_ADDRSETUP_SHIFT 24 +#define TR_100_MDMA_MASK 0x00fff000 +#define TR_100_MDMA_RECOVERY_MASK 0x00fc0000 +#define TR_100_MDMA_RECOVERY_SHIFT 18 +#define TR_100_MDMA_ACCESS_MASK 0x0003f000 +#define TR_100_MDMA_ACCESS_SHIFT 12 +#define TR_100_PIO_MASK 0xff000fff +#define TR_100_PIO_RECOVERY_MASK 0x00000fc0 +#define TR_100_PIO_RECOVERY_SHIFT 6 +#define TR_100_PIO_ACCESS_MASK 0x0000003f +#define TR_100_PIO_ACCESS_SHIFT 0 + +#define TR_100_UDMAREG_UDMA_MASK 0x0000ffff +#define TR_100_UDMAREG_UDMA_EN 0x00000001 + + +/* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on + * 40 connector cable and to 4 on 80 connector one. + * Clock unit is 15ns (66Mhz) + * + * 3 Values can be programmed: + * - Write data setup, which appears to match the cycle time. They + * also call it DIOW setup. + * - Ready to pause time (from spec) + * - Address setup. That one is weird. I don't see where exactly + * it fits in UDMA cycles, I got it's name from an obscure piece + * of commented out code in Darwin. They leave it to 0, we do as + * well, despite a comment that would lead to think it has a + * min value of 45ns. + * Apple also add 60ns to the write data setup (or cycle time ?) on + * reads. + */ +#define TR_66_UDMA_MASK 0xfff00000 +#define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */ +#define TR_66_PIO_ADDRSETUP_MASK 0xe0000000 /* Address setup */ +#define TR_66_PIO_ADDRSETUP_SHIFT 29 +#define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */ +#define TR_66_UDMA_RDY2PAUS_SHIFT 25 +#define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */ +#define TR_66_UDMA_WRDATASETUP_SHIFT 21 +#define TR_66_MDMA_MASK 0x000ffc00 +#define TR_66_MDMA_RECOVERY_MASK 0x000f8000 +#define TR_66_MDMA_RECOVERY_SHIFT 15 +#define TR_66_MDMA_ACCESS_MASK 0x00007c00 +#define TR_66_MDMA_ACCESS_SHIFT 10 +#define TR_66_PIO_MASK 0xe00003ff +#define TR_66_PIO_RECOVERY_MASK 0x000003e0 +#define TR_66_PIO_RECOVERY_SHIFT 5 +#define TR_66_PIO_ACCESS_MASK 0x0000001f +#define TR_66_PIO_ACCESS_SHIFT 0 + +/* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo + * Can do pio & mdma modes, clock unit is 30ns (33Mhz) + * + * The access time and recovery time can be programmed. Some older + * Darwin code base limit OHare to 150ns cycle time. I decided to do + * the same here fore safety against broken old hardware ;) + * The HalfTick bit, when set, adds half a clock (15ns) to the access + * time and removes one from recovery. It's not supported on KeyLargo + * implementation afaik. The E bit appears to be set for PIO mode 0 and + * is used to reach long timings used in this mode. + */ +#define TR_33_MDMA_MASK 0x003ff800 +#define TR_33_MDMA_RECOVERY_MASK 0x001f0000 +#define TR_33_MDMA_RECOVERY_SHIFT 16 +#define TR_33_MDMA_ACCESS_MASK 0x0000f800 +#define TR_33_MDMA_ACCESS_SHIFT 11 +#define TR_33_MDMA_HALFTICK 0x00200000 +#define TR_33_PIO_MASK 0x000007ff +#define TR_33_PIO_E 0x00000400 +#define TR_33_PIO_RECOVERY_MASK 0x000003e0 +#define TR_33_PIO_RECOVERY_SHIFT 5 +#define TR_33_PIO_ACCESS_MASK 0x0000001f +#define TR_33_PIO_ACCESS_SHIFT 0 + +/* + * Interrupt register definitions. Only present on newer cells + * (Keylargo and later afaik) so we don't use it. + */ +#define IDE_INTR_DMA 0x80000000 +#define IDE_INTR_DEVICE 0x40000000 + +/* + * FCR Register on Kauai. Not sure what bit 0x4 is ... + */ +#define KAUAI_FCR_UATA_MAGIC 0x00000004 +#define KAUAI_FCR_UATA_RESET_N 0x00000002 +#define KAUAI_FCR_UATA_ENABLE 0x00000001 + + +/* Allow up to 256 DBDMA commands per xfer */ +#define MAX_DCMDS 256 + +/* Don't let a DMA segment go all the way to 64K */ +#define MAX_DBDMA_SEG 0xff00 + + +/* + * Wait 1s for disk to answer on IDE bus after a hard reset + * of the device (via GPIO/FCR). + * + * Some devices seem to "pollute" the bus even after dropping + * the BSY bit (typically some combo drives slave on the UDMA + * bus) after a hard reset. Since we hard reset all drives on + * KeyLargo ATA66, we have to keep that delay around. I may end + * up not hard resetting anymore on these and keep the delay only + * for older interfaces instead (we have to reset when coming + * from MacOS...) --BenH. + */ +#define IDE_WAKEUP_DELAY_MS 1000 + +struct pata_macio_timing; + +struct pata_macio_priv { + int kind; + int aapl_bus_id; + int mediabay : 1; + struct device_node *node; + struct macio_dev *mdev; + struct pci_dev *pdev; + struct device *dev; + int irq; + u32 treg[2][2]; + void __iomem *tfregs; + void __iomem *kauai_fcr; + struct dbdma_cmd * dma_table_cpu; + dma_addr_t dma_table_dma; + struct ata_host *host; + const struct pata_macio_timing *timings; +}; + +/* Previous variants of this driver used to calculate timings + * for various variants of the chip and use tables for others. + * + * Not only was this confusing, but in addition, it isn't clear + * whether our calculation code was correct. It didn't entirely + * match the darwin code and whatever documentation I could find + * on these cells + * + * I decided to entirely rely on a table instead for this version + * of the driver. Also, because I don't really care about derated + * modes and really old HW other than making it work, I'm not going + * to calculate / snoop timing values for something else than the + * standard modes. + */ +struct pata_macio_timing { + int mode; + u32 reg1; /* Bits to set in first timing reg */ + u32 reg2; /* Bits to set in second timing reg */ +}; + +static const struct pata_macio_timing pata_macio_ohare_timings[] = { + { XFER_PIO_0, 0x00000526, 0, }, + { XFER_PIO_1, 0x00000085, 0, }, + { XFER_PIO_2, 0x00000025, 0, }, + { XFER_PIO_3, 0x00000025, 0, }, + { XFER_PIO_4, 0x00000025, 0, }, + { XFER_MW_DMA_0, 0x00074000, 0, }, + { XFER_MW_DMA_1, 0x00221000, 0, }, + { XFER_MW_DMA_2, 0x00211000, 0, }, + { -1, 0, 0 } +}; + +static const struct pata_macio_timing pata_macio_heathrow_timings[] = { + { XFER_PIO_0, 0x00000526, 0, }, + { XFER_PIO_1, 0x00000085, 0, }, + { XFER_PIO_2, 0x00000025, 0, }, + { XFER_PIO_3, 0x00000025, 0, }, + { XFER_PIO_4, 0x00000025, 0, }, + { XFER_MW_DMA_0, 0x00074000, 0, }, + { XFER_MW_DMA_1, 0x00221000, 0, }, + { XFER_MW_DMA_2, 0x00211000, 0, }, + { -1, 0, 0 } +}; + +static const struct pata_macio_timing pata_macio_kl33_timings[] = { + { XFER_PIO_0, 0x00000526, 0, }, + { XFER_PIO_1, 0x00000085, 0, }, + { XFER_PIO_2, 0x00000025, 0, }, + { XFER_PIO_3, 0x00000025, 0, }, + { XFER_PIO_4, 0x00000025, 0, }, + { XFER_MW_DMA_0, 0x00084000, 0, }, + { XFER_MW_DMA_1, 0x00021800, 0, }, + { XFER_MW_DMA_2, 0x00011800, 0, }, + { -1, 0, 0 } +}; + +static const struct pata_macio_timing pata_macio_kl66_timings[] = { + { XFER_PIO_0, 0x0000038c, 0, }, + { XFER_PIO_1, 0x0000020a, 0, }, + { XFER_PIO_2, 0x00000127, 0, }, + { XFER_PIO_3, 0x000000c6, 0, }, + { XFER_PIO_4, 0x00000065, 0, }, + { XFER_MW_DMA_0, 0x00084000, 0, }, + { XFER_MW_DMA_1, 0x00029800, 0, }, + { XFER_MW_DMA_2, 0x00019400, 0, }, + { XFER_UDMA_0, 0x19100000, 0, }, + { XFER_UDMA_1, 0x14d00000, 0, }, + { XFER_UDMA_2, 0x10900000, 0, }, + { XFER_UDMA_3, 0x0c700000, 0, }, + { XFER_UDMA_4, 0x0c500000, 0, }, + { -1, 0, 0 } +}; + +static const struct pata_macio_timing pata_macio_kauai_timings[] = { + { XFER_PIO_0, 0x08000a92, 0, }, + { XFER_PIO_1, 0x0800060f, 0, }, + { XFER_PIO_2, 0x0800038b, 0, }, + { XFER_PIO_3, 0x05000249, 0, }, + { XFER_PIO_4, 0x04000148, 0, }, + { XFER_MW_DMA_0, 0x00618000, 0, }, + { XFER_MW_DMA_1, 0x00209000, 0, }, + { XFER_MW_DMA_2, 0x00148000, 0, }, + { XFER_UDMA_0, 0, 0x000070c1, }, + { XFER_UDMA_1, 0, 0x00005d81, }, + { XFER_UDMA_2, 0, 0x00004a61, }, + { XFER_UDMA_3, 0, 0x00003a51, }, + { XFER_UDMA_4, 0, 0x00002a31, }, + { XFER_UDMA_5, 0, 0x00002921, }, + { -1, 0, 0 } +}; + +static const struct pata_macio_timing pata_macio_shasta_timings[] = { + { XFER_PIO_0, 0x0a000c97, 0, }, + { XFER_PIO_1, 0x07000712, 0, }, + { XFER_PIO_2, 0x040003cd, 0, }, + { XFER_PIO_3, 0x0500028b, 0, }, + { XFER_PIO_4, 0x0400010a, 0, }, + { XFER_MW_DMA_0, 0x00820800, 0, }, + { XFER_MW_DMA_1, 0x0028b000, 0, }, + { XFER_MW_DMA_2, 0x001ca000, 0, }, + { XFER_UDMA_0, 0, 0x00035901, }, + { XFER_UDMA_1, 0, 0x000348b1, }, + { XFER_UDMA_2, 0, 0x00033881, }, + { XFER_UDMA_3, 0, 0x00033861, }, + { XFER_UDMA_4, 0, 0x00033841, }, + { XFER_UDMA_5, 0, 0x00033031, }, + { XFER_UDMA_6, 0, 0x00033021, }, + { -1, 0, 0 } +}; + +static const struct pata_macio_timing *pata_macio_find_timing( + struct pata_macio_priv *priv, + int mode) +{ + int i; + + for (i = 0; priv->timings[i].mode > 0; i++) { + if (priv->timings[i].mode == mode) + return &priv->timings[i]; + } + return NULL; +} + + +static void pata_macio_apply_timings(struct ata_port *ap, unsigned int device) +{ + struct pata_macio_priv *priv = ap->private_data; + void __iomem *rbase = ap->ioaddr.cmd_addr; + + if (priv->kind == controller_sh_ata6 || + priv->kind == controller_un_ata6 || + priv->kind == controller_k2_ata6) { + writel(priv->treg[device][0], rbase + IDE_KAUAI_PIO_CONFIG); + writel(priv->treg[device][1], rbase + IDE_KAUAI_ULTRA_CONFIG); + } else + writel(priv->treg[device][0], rbase + IDE_TIMING_CONFIG); +} + +static void pata_macio_dev_select(struct ata_port *ap, unsigned int device) +{ + ata_sff_dev_select(ap, device); + + /* Apply timings */ + pata_macio_apply_timings(ap, device); +} + +static void pata_macio_set_timings(struct ata_port *ap, + struct ata_device *adev) +{ + struct pata_macio_priv *priv = ap->private_data; + const struct pata_macio_timing *t; + + dev_dbg(priv->dev, "Set timings: DEV=%d,PIO=0x%x (%s),DMA=0x%x (%s)\n", + adev->devno, + adev->pio_mode, + ata_mode_string(ata_xfer_mode2mask(adev->pio_mode)), + adev->dma_mode, + ata_mode_string(ata_xfer_mode2mask(adev->dma_mode))); + + /* First clear timings */ + priv->treg[adev->devno][0] = priv->treg[adev->devno][1] = 0; + + /* Now get the PIO timings */ + t = pata_macio_find_timing(priv, adev->pio_mode); + if (t == NULL) { + dev_warn(priv->dev, "Invalid PIO timing requested: 0x%x\n", + adev->pio_mode); + t = pata_macio_find_timing(priv, XFER_PIO_0); + } + BUG_ON(t == NULL); + + /* PIO timings only ever use the first treg */ + priv->treg[adev->devno][0] |= t->reg1; + + /* Now get DMA timings */ + t = pata_macio_find_timing(priv, adev->dma_mode); + if (t == NULL || (t->reg1 == 0 && t->reg2 == 0)) { + dev_dbg(priv->dev, "DMA timing not set yet, using MW_DMA_0\n"); + t = pata_macio_find_timing(priv, XFER_MW_DMA_0); + } + BUG_ON(t == NULL); + + /* DMA timings can use both tregs */ + priv->treg[adev->devno][0] |= t->reg1; + priv->treg[adev->devno][1] |= t->reg2; + + dev_dbg(priv->dev, " -> %08x %08x\n", + priv->treg[adev->devno][0], + priv->treg[adev->devno][1]); + + /* Apply to hardware */ + pata_macio_apply_timings(ap, adev->devno); +} + +/* + * Blast some well known "safe" values to the timing registers at init or + * wakeup from sleep time, before we do real calculation + */ +static void pata_macio_default_timings(struct pata_macio_priv *priv) +{ + unsigned int value, value2 = 0; + + switch(priv->kind) { + case controller_sh_ata6: + value = 0x0a820c97; + value2 = 0x00033031; + break; + case controller_un_ata6: + case controller_k2_ata6: + value = 0x08618a92; + value2 = 0x00002921; + break; + case controller_kl_ata4: + value = 0x0008438c; + break; + case controller_kl_ata3: + value = 0x00084526; + break; + case controller_heathrow: + case controller_ohare: + default: + value = 0x00074526; + break; + } + priv->treg[0][0] = priv->treg[1][0] = value; + priv->treg[0][1] = priv->treg[1][1] = value2; +} + +static int pata_macio_cable_detect(struct ata_port *ap) +{ + struct pata_macio_priv *priv = ap->private_data; + + /* Get cable type from device-tree */ + if (priv->kind == controller_kl_ata4 || + priv->kind == controller_un_ata6 || + priv->kind == controller_k2_ata6 || + priv->kind == controller_sh_ata6) { + const char* cable = of_get_property(priv->node, "cable-type", + NULL); + struct device_node *root = of_find_node_by_path("/"); + const char *model = of_get_property(root, "model", NULL); + + if (cable && !strncmp(cable, "80-", 3)) { + /* Some drives fail to detect 80c cable in PowerBook + * These machine use proprietary short IDE cable + * anyway + */ + if (!strncmp(model, "PowerBook", 9)) + return ATA_CBL_PATA40_SHORT; + else + return ATA_CBL_PATA80; + } + } + + /* G5's seem to have incorrect cable type in device-tree. + * Let's assume they always have a 80 conductor cable, this seem to + * be always the case unless the user mucked around + */ + if (of_device_is_compatible(priv->node, "K2-UATA") || + of_device_is_compatible(priv->node, "shasta-ata")) + return ATA_CBL_PATA80; + + /* Anything else is 40 connectors */ + return ATA_CBL_PATA40; +} + +static void pata_macio_qc_prep(struct ata_queued_cmd *qc) +{ + unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE); + struct ata_port *ap = qc->ap; + struct pata_macio_priv *priv = ap->private_data; + struct scatterlist *sg; + struct dbdma_cmd *table; + unsigned int si, pi; + + dev_dbgdma(priv->dev, "%s: qc %p flags %lx, write %d dev %d\n", + __func__, qc, qc->flags, write, qc->dev->devno); + + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) + return; + + table = (struct dbdma_cmd *) priv->dma_table_cpu; + + pi = 0; + for_each_sg(qc->sg, sg, qc->n_elem, si) { + u32 addr, sg_len, len; + + /* determine if physical DMA addr spans 64K boundary. + * Note h/w doesn't support 64-bit, so we unconditionally + * truncate dma_addr_t to u32. + */ + addr = (u32) sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + while (sg_len) { + /* table overflow should never happen */ + BUG_ON (pi++ >= MAX_DCMDS); + + len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG; + st_le16(&table->command, write ? OUTPUT_MORE: INPUT_MORE); + st_le16(&table->req_count, len); + st_le32(&table->phy_addr, addr); + table->cmd_dep = 0; + table->xfer_status = 0; + table->res_count = 0; + addr += len; + sg_len -= len; + ++table; + } + } + + /* Should never happen according to Tejun */ + BUG_ON(!pi); + + /* Convert the last command to an input/output */ + table--; + st_le16(&table->command, write ? OUTPUT_LAST: INPUT_LAST); + table++; + + /* Add the stop command to the end of the list */ + memset(table, 0, sizeof(struct dbdma_cmd)); + st_le16(&table->command, DBDMA_STOP); + + dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi); +} + + +static void pata_macio_freeze(struct ata_port *ap) +{ + struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; + + if (dma_regs) { + unsigned int timeout = 1000000; + + /* Make sure DMA controller is stopped */ + writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma_regs->control); + while (--timeout && (readl(&dma_regs->status) & RUN)) + udelay(1); + } + + ata_sff_freeze(ap); +} + + +static void pata_macio_bmdma_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct pata_macio_priv *priv = ap->private_data; + struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; + int dev = qc->dev->devno; + + dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc); + + /* Make sure DMA commands updates are visible */ + writel(priv->dma_table_dma, &dma_regs->cmdptr); + + /* On KeyLargo 66Mhz cell, we need to add 60ns to wrDataSetup on + * UDMA reads + */ + if (priv->kind == controller_kl_ata4 && + (priv->treg[dev][0] & TR_66_UDMA_EN)) { + void __iomem *rbase = ap->ioaddr.cmd_addr; + u32 reg = priv->treg[dev][0]; + + if (!(qc->tf.flags & ATA_TFLAG_WRITE)) + reg += 0x00800000; + writel(reg, rbase + IDE_TIMING_CONFIG); + } + + /* issue r/w command */ + ap->ops->sff_exec_command(ap, &qc->tf); +} + +static void pata_macio_bmdma_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct pata_macio_priv *priv = ap->private_data; + struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; + + dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc); + + writel((RUN << 16) | RUN, &dma_regs->control); + /* Make sure it gets to the controller right now */ + (void)readl(&dma_regs->control); +} + +static void pata_macio_bmdma_stop(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct pata_macio_priv *priv = ap->private_data; + struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; + unsigned int timeout = 1000000; + + dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc); + + /* Stop the DMA engine and wait for it to full halt */ + writel (((RUN|WAKE|DEAD) << 16), &dma_regs->control); + while (--timeout && (readl(&dma_regs->status) & RUN)) + udelay(1); +} + +static u8 pata_macio_bmdma_status(struct ata_port *ap) +{ + struct pata_macio_priv *priv = ap->private_data; + struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr; + u32 dstat, rstat = ATA_DMA_INTR; + unsigned long timeout = 0; + + dstat = readl(&dma_regs->status); + + dev_dbgdma(priv->dev, "%s: dstat=%x\n", __func__, dstat); + + /* We have two things to deal with here: + * + * - The dbdma won't stop if the command was started + * but completed with an error without transferring all + * datas. This happens when bad blocks are met during + * a multi-block transfer. + * + * - The dbdma fifo hasn't yet finished flushing to + * to system memory when the disk interrupt occurs. + * + */ + + /* First check for errors */ + if ((dstat & (RUN|DEAD)) != RUN) + rstat |= ATA_DMA_ERR; + + /* If ACTIVE is cleared, the STOP command has been hit and + * the transfer is complete. If not, we have to flush the + * channel. + */ + if ((dstat & ACTIVE) == 0) + return rstat; + + dev_dbgdma(priv->dev, "%s: DMA still active, flushing...\n", __func__); + + /* If dbdma didn't execute the STOP command yet, the + * active bit is still set. We consider that we aren't + * sharing interrupts (which is hopefully the case with + * those controllers) and so we just try to flush the + * channel for pending data in the fifo + */ + udelay(1); + writel((FLUSH << 16) | FLUSH, &dma_regs->control); + for (;;) { + udelay(1); + dstat = readl(&dma_regs->status); + if ((dstat & FLUSH) == 0) + break; + if (++timeout > 1000) { + dev_warn(priv->dev, "timeout flushing DMA\n"); + rstat |= ATA_DMA_ERR; + break; + } + } + return rstat; +} + +/* port_start is when we allocate the DMA command list */ +static int pata_macio_port_start(struct ata_port *ap) +{ + struct pata_macio_priv *priv = ap->private_data; + + if (ap->ioaddr.bmdma_addr == NULL) + return 0; + + /* Allocate space for the DBDMA commands. + * + * The +2 is +1 for the stop command and +1 to allow for + * aligning the start address to a multiple of 16 bytes. + */ + priv->dma_table_cpu = + dmam_alloc_coherent(priv->dev, + (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd), + &priv->dma_table_dma, GFP_KERNEL); + if (priv->dma_table_cpu == NULL) { + dev_err(priv->dev, "Unable to allocate DMA command list\n"); + ap->ioaddr.bmdma_addr = NULL; + } + return 0; +} + +static void pata_macio_irq_clear(struct ata_port *ap) +{ + struct pata_macio_priv *priv = ap->private_data; + + /* Nothing to do here */ + + dev_dbgdma(priv->dev, "%s\n", __func__); +} + +static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume) +{ + dev_dbg(priv->dev, "Enabling & resetting... \n"); + + if (priv->mediabay) + return; + + if (priv->kind == controller_ohare && !resume) { + /* The code below is having trouble on some ohare machines + * (timing related ?). Until I can put my hand on one of these + * units, I keep the old way + */ + ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, 0, 1); + } else { + int rc; + + /* Reset and enable controller */ + rc = ppc_md.feature_call(PMAC_FTR_IDE_RESET, + priv->node, priv->aapl_bus_id, 1); + ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, + priv->node, priv->aapl_bus_id, 1); + msleep(10); + /* Only bother waiting if there's a reset control */ + if (rc == 0) { + ppc_md.feature_call(PMAC_FTR_IDE_RESET, + priv->node, priv->aapl_bus_id, 0); + msleep(IDE_WAKEUP_DELAY_MS); + } + } + + /* If resuming a PCI device, restore the config space here */ + if (priv->pdev && resume) { + int rc; + + pci_restore_state(priv->pdev); + rc = pcim_enable_device(priv->pdev); + if (rc) + dev_printk(KERN_ERR, &priv->pdev->dev, + "Failed to enable device after resume (%d)\n", rc); + else + pci_set_master(priv->pdev); + } + + /* On Kauai, initialize the FCR. We don't perform a reset, doesn't really + * seem necessary and speeds up the boot process + */ + if (priv->kauai_fcr) + writel(KAUAI_FCR_UATA_MAGIC | + KAUAI_FCR_UATA_RESET_N | + KAUAI_FCR_UATA_ENABLE, priv->kauai_fcr); +} + +/* Hook the standard slave config to fixup some HW related alignment + * restrictions + */ +static int pata_macio_slave_config(struct scsi_device *sdev) +{ + struct ata_port *ap = ata_shost_to_port(sdev->host); + struct pata_macio_priv *priv = ap->private_data; + struct ata_device *dev; + u16 cmd; + int rc; + + /* First call original */ + rc = ata_scsi_slave_config(sdev); + if (rc) + return rc; + + /* This is lifted from sata_nv */ + dev = &ap->link.device[sdev->id]; + + /* OHare has issues with non cache aligned DMA on some chipsets */ + if (priv->kind == controller_ohare) { + blk_queue_update_dma_alignment(sdev->request_queue, 31); + blk_queue_update_dma_pad(sdev->request_queue, 31); + + /* Tell the world about it */ + ata_dev_printk(dev, KERN_INFO, "OHare alignment limits applied\n"); + return 0; + } + + /* We only have issues with ATAPI */ + if (dev->class != ATA_DEV_ATAPI) + return 0; + + /* Shasta and K2 seem to have "issues" with reads ... */ + if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) { + /* Allright these are bad, apply restrictions */ + blk_queue_update_dma_alignment(sdev->request_queue, 15); + blk_queue_update_dma_pad(sdev->request_queue, 15); + + /* We enable MWI and hack cache line size directly here, this + * is specific to this chipset and not normal values, we happen + * to somewhat know what we are doing here (which is basically + * to do the same Apple does and pray they did not get it wrong :-) + */ + BUG_ON(!priv->pdev); + pci_write_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, 0x08); + pci_read_config_word(priv->pdev, PCI_COMMAND, &cmd); + pci_write_config_word(priv->pdev, PCI_COMMAND, + cmd | PCI_COMMAND_INVALIDATE); + + /* Tell the world about it */ + ata_dev_printk(dev, KERN_INFO, + "K2/Shasta alignment limits applied\n"); + } + + return 0; +} + +#ifdef CONFIG_PM + +static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg) +{ + int rc; + + /* First, core libata suspend to do most of the work */ + rc = ata_host_suspend(priv->host, mesg); + if (rc) + return rc; + + /* Restore to default timings */ + pata_macio_default_timings(priv); + + /* Mask interrupt. Not strictly necessary but old driver did + * it and I'd rather not change that here */ + disable_irq(priv->irq); + + /* The media bay will handle itself just fine */ + if (priv->mediabay) + return 0; + + /* Kauai has bus control FCRs directly here */ + if (priv->kauai_fcr) { + u32 fcr = readl(priv->kauai_fcr); + fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE); + writel(fcr, priv->kauai_fcr); + } + + /* For PCI, save state and disable DMA. No need to call + * pci_set_power_state(), the HW doesn't do D states that + * way, the platform code will take care of suspending the + * ASIC properly + */ + if (priv->pdev) { + pci_save_state(priv->pdev); + pci_disable_device(priv->pdev); + } + + /* Disable the bus on older machines and the cell on kauai */ + ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, + priv->aapl_bus_id, 0); + + return 0; +} + +static int pata_macio_do_resume(struct pata_macio_priv *priv) +{ + /* Reset and re-enable the HW */ + pata_macio_reset_hw(priv, 1); + + /* Sanitize drive timings */ + pata_macio_apply_timings(priv->host->ports[0], 0); + + /* We want our IRQ back ! */ + enable_irq(priv->irq); + + /* Let the libata core take it from there */ + ata_host_resume(priv->host); + + return 0; +} + +#endif /* CONFIG_PM */ + +static struct scsi_host_template pata_macio_sht = { + ATA_BASE_SHT(DRV_NAME), + .sg_tablesize = MAX_DCMDS, + /* We may not need that strict one */ + .dma_boundary = ATA_DMA_BOUNDARY, + .slave_configure = pata_macio_slave_config, +}; + +static struct ata_port_operations pata_macio_ops = { + .inherits = &ata_sff_port_ops, + + .freeze = pata_macio_freeze, + .set_piomode = pata_macio_set_timings, + .set_dmamode = pata_macio_set_timings, + .cable_detect = pata_macio_cable_detect, + .sff_dev_select = pata_macio_dev_select, + .qc_prep = pata_macio_qc_prep, + .mode_filter = ata_bmdma_mode_filter, + .bmdma_setup = pata_macio_bmdma_setup, + .bmdma_start = pata_macio_bmdma_start, + .bmdma_stop = pata_macio_bmdma_stop, + .bmdma_status = pata_macio_bmdma_status, + .port_start = pata_macio_port_start, + .sff_irq_clear = pata_macio_irq_clear, +}; + +static void __devinit pata_macio_invariants(struct pata_macio_priv *priv) +{ + const int *bidp; + + /* Identify the type of controller */ + if (of_device_is_compatible(priv->node, "shasta-ata")) { + priv->kind = controller_sh_ata6; + priv->timings = pata_macio_shasta_timings; + } else if (of_device_is_compatible(priv->node, "kauai-ata")) { + priv->kind = controller_un_ata6; + priv->timings = pata_macio_kauai_timings; + } else if (of_device_is_compatible(priv->node, "K2-UATA")) { + priv->kind = controller_k2_ata6; + priv->timings = pata_macio_kauai_timings; + } else if (of_device_is_compatible(priv->node, "keylargo-ata")) { + if (strcmp(priv->node->name, "ata-4") == 0) { + priv->kind = controller_kl_ata4; + priv->timings = pata_macio_kl66_timings; + } else { + priv->kind = controller_kl_ata3; + priv->timings = pata_macio_kl33_timings; + } + } else if (of_device_is_compatible(priv->node, "heathrow-ata")) { + priv->kind = controller_heathrow; + priv->timings = pata_macio_heathrow_timings; + } else { + priv->kind = controller_ohare; + priv->timings = pata_macio_ohare_timings; + } + + /* XXX FIXME --- setup priv->mediabay here */ + + /* Get Apple bus ID (for clock and ASIC control) */ + bidp = of_get_property(priv->node, "AAPL,bus-id", NULL); + priv->aapl_bus_id = bidp ? *bidp : 0; + + /* Fixup missing Apple bus ID in case of media-bay */ + if (priv->mediabay && bidp == 0) + priv->aapl_bus_id = 1; +} + +static void __devinit pata_macio_setup_ios(struct ata_ioports *ioaddr, + void __iomem * base, + void __iomem * dma) +{ + /* cmd_addr is the base of regs for that port */ + ioaddr->cmd_addr = base; + + /* taskfile registers */ + ioaddr->data_addr = base + (ATA_REG_DATA << 4); + ioaddr->error_addr = base + (ATA_REG_ERR << 4); + ioaddr->feature_addr = base + (ATA_REG_FEATURE << 4); + ioaddr->nsect_addr = base + (ATA_REG_NSECT << 4); + ioaddr->lbal_addr = base + (ATA_REG_LBAL << 4); + ioaddr->lbam_addr = base + (ATA_REG_LBAM << 4); + ioaddr->lbah_addr = base + (ATA_REG_LBAH << 4); + ioaddr->device_addr = base + (ATA_REG_DEVICE << 4); + ioaddr->status_addr = base + (ATA_REG_STATUS << 4); + ioaddr->command_addr = base + (ATA_REG_CMD << 4); + ioaddr->altstatus_addr = base + 0x160; + ioaddr->ctl_addr = base + 0x160; + ioaddr->bmdma_addr = dma; +} + +static void __devinit pmac_macio_calc_timing_masks(struct pata_macio_priv *priv, + struct ata_port_info *pinfo) +{ + int i = 0; + + pinfo->pio_mask = 0; + pinfo->mwdma_mask = 0; + pinfo->udma_mask = 0; + + while (priv->timings[i].mode > 0) { + unsigned int mask = 1U << (priv->timings[i].mode & 0x0f); + switch(priv->timings[i].mode & 0xf0) { + case 0x00: /* PIO */ + pinfo->pio_mask |= (mask >> 8); + break; + case 0x20: /* MWDMA */ + pinfo->mwdma_mask |= mask; + break; + case 0x40: /* UDMA */ + pinfo->udma_mask |= mask; + break; + } + i++; + } + dev_dbg(priv->dev, "Supported masks: PIO=%lx, MWDMA=%lx, UDMA=%lx\n", + pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask); +} + +static int __devinit pata_macio_common_init(struct pata_macio_priv *priv, + resource_size_t tfregs, + resource_size_t dmaregs, + resource_size_t fcregs, + unsigned long irq) +{ + struct ata_port_info pinfo; + const struct ata_port_info *ppi[] = { &pinfo, NULL }; + void __iomem *dma_regs = NULL; + + /* Fill up privates with various invariants collected from the + * device-tree + */ + pata_macio_invariants(priv); + + /* Make sure we have sane initial timings in the cache */ + pata_macio_default_timings(priv); + + /* Not sure what the real max is but we know it's less than 64K, let's + * use 64K minus 256 + */ + dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG); + + /* Allocate libata host for 1 port */ + memset(&pinfo, 0, sizeof(struct ata_port_info)); + pmac_macio_calc_timing_masks(priv, &pinfo); + pinfo.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO | + ATA_FLAG_NO_LEGACY; + pinfo.port_ops = &pata_macio_ops; + pinfo.private_data = priv; + + priv->host = ata_host_alloc_pinfo(priv->dev, ppi, 1); + if (priv->host == NULL) { + dev_err(priv->dev, "Failed to allocate ATA port structure\n"); + return -ENOMEM; + } + + /* Setup the private data in host too */ + priv->host->private_data = priv; + + /* Map base registers */ + priv->tfregs = devm_ioremap(priv->dev, tfregs, 0x100); + if (priv->tfregs == NULL) { + dev_err(priv->dev, "Failed to map ATA ports\n"); + return -ENOMEM; + } + priv->host->iomap = &priv->tfregs; + + /* Map DMA regs */ + if (dmaregs != 0) { + dma_regs = devm_ioremap(priv->dev, dmaregs, + sizeof(struct dbdma_regs)); + if (dma_regs == NULL) + dev_warn(priv->dev, "Failed to map ATA DMA registers\n"); + } + + /* If chip has local feature control, map those regs too */ + if (fcregs != 0) { + priv->kauai_fcr = devm_ioremap(priv->dev, fcregs, 4); + if (priv->kauai_fcr == NULL) { + dev_err(priv->dev, "Failed to map ATA FCR register\n"); + return -ENOMEM; + } + } + + /* Setup port data structure */ + pata_macio_setup_ios(&priv->host->ports[0]->ioaddr, + priv->tfregs, dma_regs); + priv->host->ports[0]->private_data = priv; + + /* hard-reset the controller */ + pata_macio_reset_hw(priv, 0); + pata_macio_apply_timings(priv->host->ports[0], 0); + + /* Enable bus master if necessary */ + if (priv->pdev && dma_regs) + pci_set_master(priv->pdev); + + dev_info(priv->dev, "Activating pata-macio chipset %s, Apple bus ID %d\n", + macio_ata_names[priv->kind], priv->aapl_bus_id); + + /* Start it up */ + priv->irq = irq; + return ata_host_activate(priv->host, irq, ata_sff_interrupt, 0, + &pata_macio_sht); +} + +static int __devinit pata_macio_attach(struct macio_dev *mdev, + const struct of_device_id *match) +{ + struct pata_macio_priv *priv; + resource_size_t tfregs, dmaregs = 0; + unsigned long irq; + int rc; + + /* Check for broken device-trees */ + if (macio_resource_count(mdev) == 0) { + dev_err(&mdev->ofdev.dev, + "No addresses for controller\n"); + return -ENXIO; + } + + /* Enable managed resources */ + macio_enable_devres(mdev); + + /* Allocate and init private data structure */ + priv = devm_kzalloc(&mdev->ofdev.dev, + sizeof(struct pata_macio_priv), GFP_KERNEL); + if (priv == NULL) { + dev_err(&mdev->ofdev.dev, + "Failed to allocate private memory\n"); + return -ENOMEM; + } + priv->node = of_node_get(mdev->ofdev.node); + priv->mdev = mdev; + priv->dev = &mdev->ofdev.dev; + + /* Request memory resource for taskfile registers */ + if (macio_request_resource(mdev, 0, "pata-macio")) { + dev_err(&mdev->ofdev.dev, + "Cannot obtain taskfile resource\n"); + return -EBUSY; + } + tfregs = macio_resource_start(mdev, 0); + + /* Request resources for DMA registers if any */ + if (macio_resource_count(mdev) >= 2) { + if (macio_request_resource(mdev, 1, "pata-macio-dma")) + dev_err(&mdev->ofdev.dev, + "Cannot obtain DMA resource\n"); + else + dmaregs = macio_resource_start(mdev, 1); + } + + /* + * Fixup missing IRQ for some old implementations with broken + * device-trees. + * + * This is a bit bogus, it should be fixed in the device-tree itself, + * via the existing macio fixups, based on the type of interrupt + * controller in the machine. However, I have no test HW for this case, + * and this trick works well enough on those old machines... + */ + if (macio_irq_count(mdev) == 0) { + dev_warn(&mdev->ofdev.dev, + "No interrupts for controller, using 13\n"); + irq = irq_create_mapping(NULL, 13); + } else + irq = macio_irq(mdev, 0); + + /* Prevvent media bay callbacks until fully registered */ + lock_media_bay(priv->mdev->media_bay); + + /* Get register addresses and call common initialization */ + rc = pata_macio_common_init(priv, + tfregs, /* Taskfile regs */ + dmaregs, /* DBDMA regs */ + 0, /* Feature control */ + irq); + unlock_media_bay(priv->mdev->media_bay); + + return rc; +} + +static int __devexit pata_macio_detach(struct macio_dev *mdev) +{ + struct ata_host *host = macio_get_drvdata(mdev); + struct pata_macio_priv *priv = host->private_data; + + lock_media_bay(priv->mdev->media_bay); + + /* Make sure the mediabay callback doesn't try to access + * dead stuff + */ + priv->host->private_data = NULL; + + ata_host_detach(host); + + unlock_media_bay(priv->mdev->media_bay); + + return 0; +} + +#ifdef CONFIG_PM + +static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg) +{ + struct ata_host *host = macio_get_drvdata(mdev); + + return pata_macio_do_suspend(host->private_data, mesg); +} + +static int pata_macio_resume(struct macio_dev *mdev) +{ + struct ata_host *host = macio_get_drvdata(mdev); + + return pata_macio_do_resume(host->private_data); +} + +#endif /* CONFIG_PM */ + +#ifdef CONFIG_PMAC_MEDIABAY +static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state) +{ + struct ata_host *host = macio_get_drvdata(mdev); + struct ata_port *ap; + struct ata_eh_info *ehi; + struct ata_device *dev; + unsigned long flags; + + if (!host || !host->private_data) + return; + ap = host->ports[0]; + spin_lock_irqsave(ap->lock, flags); + ehi = &ap->link.eh_info; + if (mb_state == MB_CD) { + ata_ehi_push_desc(ehi, "mediabay plug"); + ata_ehi_hotplugged(ehi); + ata_port_freeze(ap); + } else { + ata_ehi_push_desc(ehi, "mediabay unplug"); + ata_for_each_dev(dev, &ap->link, ALL) + dev->flags |= ATA_DFLAG_DETACH; + ata_port_abort(ap); + } + spin_unlock_irqrestore(ap->lock, flags); + +} +#endif /* CONFIG_PMAC_MEDIABAY */ + + +static int __devinit pata_macio_pci_attach(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct pata_macio_priv *priv; + struct device_node *np; + resource_size_t rbase; + + /* We cannot use a MacIO controller without its OF device node */ + np = pci_device_to_OF_node(pdev); + if (np == NULL) { + dev_err(&pdev->dev, + "Cannot find OF device node for controller\n"); + return -ENODEV; + } + + /* Check that it can be enabled */ + if (pcim_enable_device(pdev)) { + dev_err(&pdev->dev, + "Cannot enable controller PCI device\n"); + return -ENXIO; + } + + /* Allocate and init private data structure */ + priv = devm_kzalloc(&pdev->dev, + sizeof(struct pata_macio_priv), GFP_KERNEL); + if (priv == NULL) { + dev_err(&pdev->dev, + "Failed to allocate private memory\n"); + return -ENOMEM; + } + priv->node = of_node_get(np); + priv->pdev = pdev; + priv->dev = &pdev->dev; + + /* Get MMIO regions */ + if (pci_request_regions(pdev, "pata-macio")) { + dev_err(&pdev->dev, + "Cannot obtain PCI resources\n"); + return -EBUSY; + } + + /* Get register addresses and call common initialization */ + rbase = pci_resource_start(pdev, 0); + if (pata_macio_common_init(priv, + rbase + 0x2000, /* Taskfile regs */ + rbase + 0x1000, /* DBDMA regs */ + rbase, /* Feature control */ + pdev->irq)) + return -ENXIO; + + return 0; +} + +static void __devexit pata_macio_pci_detach(struct pci_dev *pdev) +{ + struct ata_host *host = dev_get_drvdata(&pdev->dev); + + ata_host_detach(host); +} + +#ifdef CONFIG_PM + +static int pata_macio_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) +{ + struct ata_host *host = dev_get_drvdata(&pdev->dev); + + return pata_macio_do_suspend(host->private_data, mesg); +} + +static int pata_macio_pci_resume(struct pci_dev *pdev) +{ + struct ata_host *host = dev_get_drvdata(&pdev->dev); + + return pata_macio_do_resume(host->private_data); +} + +#endif /* CONFIG_PM */ + +static struct of_device_id pata_macio_match[] = +{ + { + .name = "IDE", + }, + { + .name = "ATA", + }, + { + .type = "ide", + }, + { + .type = "ata", + }, + {}, +}; + +static struct macio_driver pata_macio_driver = +{ + .name = "pata-macio", + .match_table = pata_macio_match, + .probe = pata_macio_attach, + .remove = pata_macio_detach, +#ifdef CONFIG_PM + .suspend = pata_macio_suspend, + .resume = pata_macio_resume, +#endif +#ifdef CONFIG_PMAC_MEDIABAY + .mediabay_event = pata_macio_mb_event, +#endif + .driver = { + .owner = THIS_MODULE, + }, +}; + +static const struct pci_device_id pata_macio_pci_match[] = { + { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA), 0 }, + { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100), 0 }, + { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100), 0 }, + { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA), 0 }, + { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA), 0 }, + {}, +}; + +static struct pci_driver pata_macio_pci_driver = { + .name = "pata-pci-macio", + .id_table = pata_macio_pci_match, + .probe = pata_macio_pci_attach, + .remove = pata_macio_pci_detach, +#ifdef CONFIG_PM + .suspend = pata_macio_pci_suspend, + .resume = pata_macio_pci_resume, +#endif + .driver = { + .owner = THIS_MODULE, + }, +}; +MODULE_DEVICE_TABLE(pci, pata_macio_pci_match); + + +static int __init pata_macio_init(void) +{ + int rc; + + if (!machine_is(powermac)) + return -ENODEV; + + rc = pci_register_driver(&pata_macio_pci_driver); + if (rc) + return rc; + rc = macio_register_driver(&pata_macio_driver); + if (rc) { + pci_unregister_driver(&pata_macio_pci_driver); + return rc; + } + return 0; +} + +static void __exit pata_macio_exit(void) +{ + macio_unregister_driver(&pata_macio_driver); + pci_unregister_driver(&pata_macio_pci_driver); +} + +module_init(pata_macio_init); +module_exit(pata_macio_exit); + +MODULE_AUTHOR("Benjamin Herrenschmidt"); +MODULE_DESCRIPTION("Apple MacIO PATA driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index e62a4ccea54d..27fd775375b0 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -35,6 +35,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut struct cpu *cpu = container_of(dev, struct cpu, sysdev); ssize_t ret; + cpu_hotplug_driver_lock(); switch (buf[0]) { case '0': ret = cpu_down(cpu->sysdev.id); @@ -49,6 +50,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut default: ret = -EINVAL; } + cpu_hotplug_driver_unlock(); if (ret >= 0) ret = count; @@ -72,6 +74,38 @@ void unregister_cpu(struct cpu *cpu) per_cpu(cpu_sys_devices, logical_cpu) = NULL; return; } + +#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE +static ssize_t cpu_probe_store(struct class *class, const char *buf, + size_t count) +{ + return arch_cpu_probe(buf, count); +} + +static ssize_t cpu_release_store(struct class *class, const char *buf, + size_t count) +{ + return arch_cpu_release(buf, count); +} + +static CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); +static CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store); + +int __init cpu_probe_release_init(void) +{ + int rc; + + rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, + &class_attr_probe.attr); + if (!rc) + rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, + &class_attr_release.attr); + + return rc; +} +device_initcall(cpu_probe_release_init); +#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ + #else /* ... !CONFIG_HOTPLUG_CPU */ static inline void register_cpu_control(struct cpu *cpu) { diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 6380ad8d91bd..59ca2b77b574 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -200,7 +200,7 @@ struct floppy_state { int ejected; wait_queue_head_t wait; int wanted; - struct device_node* media_bay; /* NULL when not in bay */ + struct macio_dev *mdev; char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)]; }; @@ -303,14 +303,13 @@ static int swim3_readbit(struct floppy_state *fs, int bit) static void do_fd_request(struct request_queue * q) { int i; - for(i=0;i<floppy_count;i++) - { -#ifdef CONFIG_PMAC_MEDIABAY - if (floppy_states[i].media_bay && - check_media_bay(floppy_states[i].media_bay, MB_FD)) + + for(i=0; i<floppy_count; i++) { + struct floppy_state *fs = &floppy_states[i]; + if (fs->mdev->media_bay && + check_media_bay(fs->mdev->media_bay) != MB_FD) continue; -#endif /* CONFIG_PMAC_MEDIABAY */ - start_request(&floppy_states[i]); + start_request(fs); } } @@ -849,10 +848,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode, if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN)) return -EPERM; -#ifdef CONFIG_PMAC_MEDIABAY - if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) + if (fs->mdev->media_bay && + check_media_bay(fs->mdev->media_bay) != MB_FD) return -ENXIO; -#endif switch (cmd) { case FDEJECT: @@ -876,10 +874,9 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) int n, err = 0; if (fs->ref_count == 0) { -#ifdef CONFIG_PMAC_MEDIABAY - if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) + if (fs->mdev->media_bay && + check_media_bay(fs->mdev->media_bay) != MB_FD) return -ENXIO; -#endif out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2); out_8(&sw->control_bic, 0xff); out_8(&sw->mode, 0x95); @@ -963,10 +960,9 @@ static int floppy_revalidate(struct gendisk *disk) struct swim3 __iomem *sw; int ret, n; -#ifdef CONFIG_PMAC_MEDIABAY - if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) + if (fs->mdev->media_bay && + check_media_bay(fs->mdev->media_bay) != MB_FD) return -ENXIO; -#endif sw = fs->swim3; grab_drive(fs, revalidating, 0); @@ -1009,7 +1005,6 @@ static const struct block_device_operations floppy_fops = { static int swim3_add_device(struct macio_dev *mdev, int index) { struct device_node *swim = mdev->ofdev.node; - struct device_node *mediabay; struct floppy_state *fs = &floppy_states[index]; int rc = -EBUSY; @@ -1036,9 +1031,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index) } dev_set_drvdata(&mdev->ofdev.dev, fs); - mediabay = (strcasecmp(swim->parent->type, "media-bay") == 0) ? - swim->parent : NULL; - if (mediabay == NULL) + if (mdev->media_bay == NULL) pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1); memset(fs, 0, sizeof(*fs)); @@ -1068,7 +1061,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index) fs->secpercyl = 36; fs->secpertrack = 18; fs->total_secs = 2880; - fs->media_bay = mediabay; + fs->mdev = mdev; init_waitqueue_head(&fs->wait); fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space); @@ -1093,7 +1086,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index) init_timer(&fs->timeout); printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count, - mediabay ? "in media bay" : ""); + mdev->media_bay ? "in media bay" : ""); return 0; diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index 703959eba45a..d89da4ac061f 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c @@ -144,16 +144,13 @@ static int uninorth_configure(void) return 0; } -static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, - int type) +static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int type) { - int i, j, num_entries; + int i, num_entries; void *temp; + u32 *gp; int mask_type; - temp = agp_bridge->current_size; - num_entries = A_SIZE_32(temp)->num_entries; - if (type != mem->type) return -EINVAL; @@ -163,49 +160,12 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, return -EINVAL; } - if ((pg_start + mem->page_count) > num_entries) - return -EINVAL; - - j = pg_start; - - while (j < (pg_start + mem->page_count)) { - if (agp_bridge->gatt_table[j]) - return -EBUSY; - j++; - } - - for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - agp_bridge->gatt_table[j] = - cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) | 0x1UL); - flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])), - (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000); - } - (void)in_le32((volatile u32*)&agp_bridge->gatt_table[pg_start]); - mb(); - - uninorth_tlbflush(mem); - return 0; -} - -static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type) -{ - int i, num_entries; - void *temp; - u32 *gp; - int mask_type; + if (mem->page_count == 0) + return 0; temp = agp_bridge->current_size; num_entries = A_SIZE_32(temp)->num_entries; - if (type != mem->type) - return -EINVAL; - - mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); - if (mask_type != 0) { - /* We know nothing of memory types */ - return -EINVAL; - } - if ((pg_start + mem->page_count) > num_entries) return -EINVAL; @@ -213,14 +173,18 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type) for (i = 0; i < mem->page_count; ++i) { if (gp[i]) { dev_info(&agp_bridge->dev->dev, - "u3_insert_memory: entry 0x%x occupied (%x)\n", + "uninorth_insert_memory: entry 0x%x occupied (%x)\n", i, gp[i]); return -EBUSY; } } for (i = 0; i < mem->page_count; i++) { - gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL; + if (is_u3) + gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL; + else + gp[i] = cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) | + 0x1UL); flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])), (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000); } @@ -230,14 +194,23 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type) return 0; } -int u3_remove_memory(struct agp_memory *mem, off_t pg_start, int type) +int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { size_t i; u32 *gp; + int mask_type; + + if (type != mem->type) + return -EINVAL; - if (type != 0 || mem->type != 0) + mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); + if (mask_type != 0) { /* We know nothing of memory types */ return -EINVAL; + } + + if (mem->page_count == 0) + return 0; gp = (u32 *) &agp_bridge->gatt_table[pg_start]; for (i = 0; i < mem->page_count; ++i) @@ -536,7 +509,7 @@ const struct agp_bridge_driver uninorth_agp_driver = { .create_gatt_table = uninorth_create_gatt_table, .free_gatt_table = uninorth_free_gatt_table, .insert_memory = uninorth_insert_memory, - .remove_memory = agp_generic_remove_memory, + .remove_memory = uninorth_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, @@ -562,8 +535,8 @@ const struct agp_bridge_driver u3_agp_driver = { .agp_enable = uninorth_agp_enable, .create_gatt_table = uninorth_create_gatt_table, .free_gatt_table = uninorth_free_gatt_table, - .insert_memory = u3_insert_memory, - .remove_memory = u3_remove_memory, + .insert_memory = uninorth_insert_memory, + .remove_memory = uninorth_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index a632f25f144a..416d3423150d 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c @@ -832,6 +832,7 @@ int hvc_remove(struct hvc_struct *hp) tty_hangup(tty); return 0; } +EXPORT_SYMBOL_GPL(hvc_remove); /* Driver initialization: called as soon as someone uses hvc_alloc(). */ static int hvc_init(void) diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c index 97642a7a79c4..7a4e788cab2f 100644 --- a/drivers/ide/pmac.c +++ b/drivers/ide/pmac.c @@ -43,10 +43,7 @@ #include <asm/pmac_feature.h> #include <asm/sections.h> #include <asm/irq.h> - -#ifndef CONFIG_PPC64 #include <asm/mediabay.h> -#endif #define DRV_NAME "ide-pmac" @@ -59,13 +56,14 @@ typedef struct pmac_ide_hwif { int irq; int kind; int aapl_bus_id; - unsigned mediabay : 1; unsigned broken_dma : 1; unsigned broken_dma_warn : 1; struct device_node* node; struct macio_dev *mdev; u32 timings[4]; volatile u32 __iomem * *kauai_fcr; + ide_hwif_t *hwif; + /* Those fields are duplicating what is in hwif. We currently * can't use the hwif ones because of some assumptions that are * beeing done by the generic code about the kind of dma controller @@ -854,6 +852,11 @@ sanitize_timings(pmac_ide_hwif_t *pmif) pmif->timings[2] = pmif->timings[3] = value2; } +static int on_media_bay(pmac_ide_hwif_t *pmif) +{ + return pmif->mdev && pmif->mdev->media_bay != NULL; +} + /* Suspend call back, should be called after the child devices * have actually been suspended */ @@ -866,7 +869,7 @@ static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif) disable_irq(pmif->irq); /* The media bay will handle itself just fine */ - if (pmif->mediabay) + if (on_media_bay(pmif)) return 0; /* Kauai has bus control FCRs directly here */ @@ -889,7 +892,7 @@ static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif) static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif) { /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */ - if (!pmif->mediabay) { + if (!on_media_bay(pmif)) { ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1); ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1); msleep(10); @@ -950,13 +953,11 @@ static void pmac_ide_init_dev(ide_drive_t *drive) pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); - if (pmif->mediabay) { -#ifdef CONFIG_PMAC_MEDIABAY - if (check_media_bay_by_base(pmif->regbase, MB_CD) == 0) { + if (on_media_bay(pmif)) { + if (check_media_bay(pmif->mdev->media_bay) == MB_CD) { drive->dev_flags &= ~IDE_DFLAG_NOPROBE; return; } -#endif drive->dev_flags |= IDE_DFLAG_NOPROBE; } } @@ -1072,26 +1073,23 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, writel(KAUAI_FCR_UATA_MAGIC | KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE, pmif->kauai_fcr); - - pmif->mediabay = 0; /* Make sure we have sane timings */ sanitize_timings(pmif); + /* If we are on a media bay, wait for it to settle and lock it */ + if (pmif->mdev) + lock_media_bay(pmif->mdev->media_bay); + host = ide_host_alloc(&d, hws, 1); - if (host == NULL) - return -ENOMEM; - hwif = host->ports[0]; + if (host == NULL) { + rc = -ENOMEM; + goto bail; + } + hwif = pmif->hwif = host->ports[0]; -#ifndef CONFIG_PPC64 - /* XXX FIXME: Media bay stuff need re-organizing */ - if (np->parent && np->parent->name - && strcasecmp(np->parent->name, "media-bay") == 0) { -#ifdef CONFIG_PMAC_MEDIABAY - media_bay_set_ide_infos(np->parent, pmif->regbase, pmif->irq, - hwif); -#endif /* CONFIG_PMAC_MEDIABAY */ - pmif->mediabay = 1; + if (on_media_bay(pmif)) { + /* Fixup bus ID for media bay */ if (!bidp) pmif->aapl_bus_id = 1; } else if (pmif->kind == controller_ohare) { @@ -1100,9 +1098,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, * units, I keep the old way */ ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1); - } else -#endif - { + } else { /* This is necessary to enable IDE when net-booting */ ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1); ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1); @@ -1112,17 +1108,21 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, } printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), " - "bus ID %d%s, irq %d\n", model_name[pmif->kind], - pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id, - pmif->mediabay ? " (mediabay)" : "", hw->irq); + "bus ID %d%s, irq %d\n", model_name[pmif->kind], + pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id, + on_media_bay(pmif) ? " (mediabay)" : "", hw->irq); rc = ide_host_register(host, &d, hws); - if (rc) { - ide_host_free(host); - return rc; - } + if (rc) + pmif->hwif = NULL; - return 0; + if (pmif->mdev) + unlock_media_bay(pmif->mdev->media_bay); + + bail: + if (rc && host) + ide_host_free(host); + return rc; } static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base) @@ -1362,6 +1362,25 @@ pmac_ide_pci_resume(struct pci_dev *pdev) return rc; } +#ifdef CONFIG_PMAC_MEDIABAY +static void pmac_ide_macio_mb_event(struct macio_dev* mdev, int mb_state) +{ + pmac_ide_hwif_t *pmif = + (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev); + + switch(mb_state) { + case MB_CD: + if (!pmif->hwif->present) + ide_port_scan(pmif->hwif); + break; + default: + if (pmif->hwif->present) + ide_port_unregister_devices(pmif->hwif); + } +} +#endif /* CONFIG_PMAC_MEDIABAY */ + + static struct of_device_id pmac_ide_macio_match[] = { { @@ -1386,6 +1405,9 @@ static struct macio_driver pmac_ide_macio_driver = .probe = pmac_ide_macio_attach, .suspend = pmac_ide_macio_suspend, .resume = pmac_ide_macio_resume, +#ifdef CONFIG_PMAC_MEDIABAY + .mediabay_event = pmac_ide_macio_mb_event, +#endif }; static const struct pci_device_id pmac_ide_pci_match[] = { diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index 588a5b0bc4b5..26a303a1d1ab 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c @@ -379,6 +379,11 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, dev->ofdev.dev.parent = parent; dev->ofdev.dev.bus = &macio_bus_type; dev->ofdev.dev.release = macio_release_dev; + dev->ofdev.dev.dma_parms = &dev->dma_parms; + + /* Standard DMA paremeters */ + dma_set_max_seg_size(&dev->ofdev.dev, 65536); + dma_set_seg_boundary(&dev->ofdev.dev, 0xffffffff); #ifdef CONFIG_PCI /* Set the DMA ops to the ones from the PCI device, this could be @@ -538,6 +543,42 @@ void macio_unregister_driver(struct macio_driver *drv) driver_unregister(&drv->driver); } +/* Managed MacIO resources */ +struct macio_devres { + u32 res_mask; +}; + +static void maciom_release(struct device *gendev, void *res) +{ + struct macio_dev *dev = to_macio_device(gendev); + struct macio_devres *dr = res; + int i, max; + + max = min(dev->n_resources, 32); + for (i = 0; i < max; i++) { + if (dr->res_mask & (1 << i)) + macio_release_resource(dev, i); + } +} + +int macio_enable_devres(struct macio_dev *dev) +{ + struct macio_devres *dr; + + dr = devres_find(&dev->ofdev.dev, maciom_release, NULL, NULL); + if (!dr) { + dr = devres_alloc(maciom_release, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + } + return devres_get(&dev->ofdev.dev, dr, NULL, NULL) != NULL; +} + +static struct macio_devres * find_macio_dr(struct macio_dev *dev) +{ + return devres_find(&dev->ofdev.dev, maciom_release, NULL, NULL); +} + /** * macio_request_resource - Request an MMIO resource * @dev: pointer to the device holding the resource @@ -555,6 +596,8 @@ void macio_unregister_driver(struct macio_driver *drv) int macio_request_resource(struct macio_dev *dev, int resource_no, const char *name) { + struct macio_devres *dr = find_macio_dr(dev); + if (macio_resource_len(dev, resource_no) == 0) return 0; @@ -562,6 +605,9 @@ int macio_request_resource(struct macio_dev *dev, int resource_no, macio_resource_len(dev, resource_no), name)) goto err_out; + + if (dr && resource_no < 32) + dr->res_mask |= 1 << resource_no; return 0; @@ -582,10 +628,14 @@ err_out: */ void macio_release_resource(struct macio_dev *dev, int resource_no) { + struct macio_devres *dr = find_macio_dr(dev); + if (macio_resource_len(dev, resource_no) == 0) return; release_mem_region(macio_resource_start(dev, resource_no), macio_resource_len(dev, resource_no)); + if (dr && resource_no < 32) + dr->res_mask &= ~(1 << resource_no); } /** @@ -744,3 +794,5 @@ EXPORT_SYMBOL(macio_request_resource); EXPORT_SYMBOL(macio_release_resource); EXPORT_SYMBOL(macio_request_resources); EXPORT_SYMBOL(macio_release_resources); +EXPORT_SYMBOL(macio_enable_devres); + diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c index 029ad8ce8a7e..08002b88f342 100644 --- a/drivers/macintosh/mediabay.c +++ b/drivers/macintosh/mediabay.c @@ -33,15 +33,6 @@ #include <linux/adb.h> #include <linux/pmu.h> - -#define MB_DEBUG - -#ifdef MB_DEBUG -#define MBDBG(fmt, arg...) printk(KERN_INFO fmt , ## arg) -#else -#define MBDBG(fmt, arg...) do { } while (0) -#endif - #define MB_FCR32(bay, r) ((bay)->base + ((r) >> 2)) #define MB_FCR8(bay, r) (((volatile u8 __iomem *)((bay)->base)) + (r)) @@ -76,28 +67,14 @@ struct media_bay_info { int index; int cached_gpio; int sleeping; + int user_lock; struct mutex lock; -#ifdef CONFIG_BLK_DEV_IDE_PMAC - ide_hwif_t *cd_port; - void __iomem *cd_base; - int cd_irq; - int cd_retry; -#endif -#if defined(CONFIG_BLK_DEV_IDE_PMAC) - int cd_index; -#endif }; #define MAX_BAYS 2 static struct media_bay_info media_bays[MAX_BAYS]; -int media_bay_count = 0; - -#ifdef CONFIG_BLK_DEV_IDE_PMAC -/* check the busy bit in the media-bay ide interface - (assumes the media-bay contains an ide device) */ -#define MB_IDE_READY(i) ((readb(media_bays[i].cd_base + 0x70) & 0x80) == 0) -#endif +static int media_bay_count = 0; /* * Wait that number of ms between each step in normal polling mode @@ -130,21 +107,11 @@ int media_bay_count = 0; /* * Wait this many ticks after an IDE device (e.g. CD-ROM) is inserted - * (or until the device is ready) before waiting for busy bit to disappear + * (or until the device is ready) before calling into the driver */ #define MB_IDE_WAIT 1000 /* - * Timeout waiting for busy bit of an IDE device to go down - */ -#define MB_IDE_TIMEOUT 5000 - -/* - * Max retries of the full power up/down sequence for an IDE device - */ -#define MAX_CD_RETRIES 3 - -/* * States of a media bay */ enum { @@ -153,7 +120,6 @@ enum { mb_enabling_bay, /* enable bits set, waiting MB_RESET_DELAY */ mb_resetting, /* reset bit unset, waiting MB_SETUP_DELAY */ mb_ide_resetting, /* IDE reset bit unser, waiting MB_IDE_WAIT */ - mb_ide_waiting, /* Waiting for BUSY bit to go away until MB_IDE_TIMEOUT */ mb_up, /* Media bay full */ mb_powering_down /* Powering down (avoid too fast down/up) */ }; @@ -373,12 +339,12 @@ static inline void set_mb_power(struct media_bay_info* bay, int onoff) if (onoff) { bay->ops->power(bay, 1); bay->state = mb_powering_up; - MBDBG("mediabay%d: powering up\n", bay->index); + pr_debug("mediabay%d: powering up\n", bay->index); } else { /* Make sure everything is powered down & disabled */ bay->ops->power(bay, 0); bay->state = mb_powering_down; - MBDBG("mediabay%d: powering down\n", bay->index); + pr_debug("mediabay%d: powering down\n", bay->index); } bay->timer = msecs_to_jiffies(MB_POWER_DELAY); } @@ -387,107 +353,118 @@ static void poll_media_bay(struct media_bay_info* bay) { int id = bay->ops->content(bay); - if (id == bay->last_value) { - if (id != bay->content_id) { - bay->value_count += msecs_to_jiffies(MB_POLL_DELAY); - if (bay->value_count >= msecs_to_jiffies(MB_STABLE_DELAY)) { - /* If the device type changes without going thru - * "MB_NO", we force a pass by "MB_NO" to make sure - * things are properly reset - */ - if ((id != MB_NO) && (bay->content_id != MB_NO)) { - id = MB_NO; - MBDBG("mediabay%d: forcing MB_NO\n", bay->index); - } - MBDBG("mediabay%d: switching to %d\n", bay->index, id); - set_mb_power(bay, id != MB_NO); - bay->content_id = id; - if (id == MB_NO) { -#ifdef CONFIG_BLK_DEV_IDE_PMAC - bay->cd_retry = 0; -#endif - printk(KERN_INFO "media bay %d is empty\n", bay->index); - } - } - } - } else { + static char *mb_content_types[] = { + "a floppy drive", + "a floppy drive", + "an unsuported audio device", + "an ATA device", + "an unsupported PCI device", + "an unknown device", + }; + + if (id != bay->last_value) { bay->last_value = id; bay->value_count = 0; + return; + } + if (id == bay->content_id) + return; + + bay->value_count += msecs_to_jiffies(MB_POLL_DELAY); + if (bay->value_count >= msecs_to_jiffies(MB_STABLE_DELAY)) { + /* If the device type changes without going thru + * "MB_NO", we force a pass by "MB_NO" to make sure + * things are properly reset + */ + if ((id != MB_NO) && (bay->content_id != MB_NO)) { + id = MB_NO; + pr_debug("mediabay%d: forcing MB_NO\n", bay->index); + } + pr_debug("mediabay%d: switching to %d\n", bay->index, id); + set_mb_power(bay, id != MB_NO); + bay->content_id = id; + if (id >= MB_NO || id < 0) + printk(KERN_INFO "mediabay%d: Bay is now empty\n", bay->index); + else + printk(KERN_INFO "mediabay%d: Bay contains %s\n", + bay->index, mb_content_types[id]); } } -#ifdef CONFIG_BLK_DEV_IDE_PMAC -int check_media_bay(struct device_node *which_bay, int what) +int check_media_bay(struct macio_dev *baydev) { - int i; + struct media_bay_info* bay; + int id; - for (i=0; i<media_bay_count; i++) - if (media_bays[i].mdev && which_bay == media_bays[i].mdev->ofdev.node) { - if ((what == media_bays[i].content_id) && media_bays[i].state == mb_up) - return 0; - media_bays[i].cd_index = -1; - return -EINVAL; - } - return -ENODEV; + if (baydev == NULL) + return MB_NO; + + /* This returns an instant snapshot, not locking, sine + * we may be called with the bay lock held. The resulting + * fuzzyness of the result if called at the wrong time is + * not actually a huge deal + */ + bay = macio_get_drvdata(baydev); + if (bay == NULL) + return MB_NO; + id = bay->content_id; + if (bay->state != mb_up) + return MB_NO; + if (id == MB_FD1) + return MB_FD; + return id; } -EXPORT_SYMBOL(check_media_bay); +EXPORT_SYMBOL_GPL(check_media_bay); -int check_media_bay_by_base(unsigned long base, int what) +void lock_media_bay(struct macio_dev *baydev) { - int i; - - for (i=0; i<media_bay_count; i++) - if (media_bays[i].mdev && base == (unsigned long) media_bays[i].cd_base) { - if ((what == media_bays[i].content_id) && media_bays[i].state == mb_up) - return 0; - media_bays[i].cd_index = -1; - return -EINVAL; - } + struct media_bay_info* bay; - return -ENODEV; + if (baydev == NULL) + return; + bay = macio_get_drvdata(baydev); + if (bay == NULL) + return; + mutex_lock(&bay->lock); + bay->user_lock = 1; } -EXPORT_SYMBOL_GPL(check_media_bay_by_base); +EXPORT_SYMBOL_GPL(lock_media_bay); -int media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base, - int irq, ide_hwif_t *hwif) +void unlock_media_bay(struct macio_dev *baydev) { - int i; + struct media_bay_info* bay; - for (i=0; i<media_bay_count; i++) { - struct media_bay_info* bay = &media_bays[i]; - - if (bay->mdev && which_bay == bay->mdev->ofdev.node) { - int timeout = 5000, index = hwif->index; - - mutex_lock(&bay->lock); - - bay->cd_port = hwif; - bay->cd_base = (void __iomem *) base; - bay->cd_irq = irq; - - if ((MB_CD != bay->content_id) || bay->state != mb_up) { - mutex_unlock(&bay->lock); - return 0; - } - printk(KERN_DEBUG "Registered ide%d for media bay %d\n", index, i); - do { - if (MB_IDE_READY(i)) { - bay->cd_index = index; - mutex_unlock(&bay->lock); - return 0; - } - mdelay(1); - } while(--timeout); - printk(KERN_DEBUG "Timeount waiting IDE in bay %d\n", i); - mutex_unlock(&bay->lock); - return -ENODEV; - } + if (baydev == NULL) + return; + bay = macio_get_drvdata(baydev); + if (bay == NULL) + return; + if (bay->user_lock) { + bay->user_lock = 0; + mutex_unlock(&bay->lock); } +} +EXPORT_SYMBOL_GPL(unlock_media_bay); - return -ENODEV; +static int mb_broadcast_hotplug(struct device *dev, void *data) +{ + struct media_bay_info* bay = data; + struct macio_dev *mdev; + struct macio_driver *drv; + int state; + + if (dev->bus != &macio_bus_type) + return 0; + + state = bay->state == mb_up ? bay->content_id : MB_NO; + if (state == MB_FD1) + state = MB_FD; + mdev = to_macio_device(dev); + drv = to_macio_driver(dev->driver); + if (dev->driver && drv->mediabay_event) + drv->mediabay_event(mdev, state); + return 0; } -EXPORT_SYMBOL_GPL(media_bay_set_ide_infos); -#endif /* CONFIG_BLK_DEV_IDE_PMAC */ static void media_bay_step(int i) { @@ -497,8 +474,8 @@ static void media_bay_step(int i) if (bay->state != mb_powering_down) poll_media_bay(bay); - /* If timer expired or polling IDE busy, run state machine */ - if ((bay->state != mb_ide_waiting) && (bay->timer != 0)) { + /* If timer expired run state machine */ + if (bay->timer != 0) { bay->timer -= msecs_to_jiffies(MB_POLL_DELAY); if (bay->timer > 0) return; @@ -508,100 +485,50 @@ static void media_bay_step(int i) switch(bay->state) { case mb_powering_up: if (bay->ops->setup_bus(bay, bay->last_value) < 0) { - MBDBG("mediabay%d: device not supported (kind:%d)\n", i, bay->content_id); + pr_debug("mediabay%d: device not supported (kind:%d)\n", + i, bay->content_id); set_mb_power(bay, 0); break; } bay->timer = msecs_to_jiffies(MB_RESET_DELAY); bay->state = mb_enabling_bay; - MBDBG("mediabay%d: enabling (kind:%d)\n", i, bay->content_id); + pr_debug("mediabay%d: enabling (kind:%d)\n", i, bay->content_id); break; case mb_enabling_bay: bay->ops->un_reset(bay); bay->timer = msecs_to_jiffies(MB_SETUP_DELAY); bay->state = mb_resetting; - MBDBG("mediabay%d: waiting reset (kind:%d)\n", i, bay->content_id); + pr_debug("mediabay%d: releasing bay reset (kind:%d)\n", + i, bay->content_id); break; case mb_resetting: if (bay->content_id != MB_CD) { - MBDBG("mediabay%d: bay is up (kind:%d)\n", i, bay->content_id); + pr_debug("mediabay%d: bay is up (kind:%d)\n", i, + bay->content_id); bay->state = mb_up; + device_for_each_child(&bay->mdev->ofdev.dev, + bay, mb_broadcast_hotplug); break; } -#ifdef CONFIG_BLK_DEV_IDE_PMAC - MBDBG("mediabay%d: waiting IDE reset (kind:%d)\n", i, bay->content_id); + pr_debug("mediabay%d: releasing ATA reset (kind:%d)\n", + i, bay->content_id); bay->ops->un_reset_ide(bay); bay->timer = msecs_to_jiffies(MB_IDE_WAIT); bay->state = mb_ide_resetting; -#else - printk(KERN_DEBUG "media-bay %d is ide (not compiled in kernel)\n", i); - set_mb_power(bay, 0); -#endif /* CONFIG_BLK_DEV_IDE_PMAC */ break; -#ifdef CONFIG_BLK_DEV_IDE_PMAC + case mb_ide_resetting: - bay->timer = msecs_to_jiffies(MB_IDE_TIMEOUT); - bay->state = mb_ide_waiting; - MBDBG("mediabay%d: waiting IDE ready (kind:%d)\n", i, bay->content_id); + pr_debug("mediabay%d: bay is up (kind:%d)\n", i, bay->content_id); + bay->state = mb_up; + device_for_each_child(&bay->mdev->ofdev.dev, + bay, mb_broadcast_hotplug); break; - case mb_ide_waiting: - if (bay->cd_base == NULL) { - bay->timer = 0; - bay->state = mb_up; - MBDBG("mediabay%d: up before IDE init\n", i); - break; - } else if (MB_IDE_READY(i)) { - bay->timer = 0; - bay->state = mb_up; - if (bay->cd_index < 0) { - printk("mediabay %d, registering IDE...\n", i); - pmu_suspend(); - ide_port_scan(bay->cd_port); - if (bay->cd_port->present) - bay->cd_index = bay->cd_port->index; - pmu_resume(); - } - if (bay->cd_index == -1) { - /* We eventually do a retry */ - bay->cd_retry++; - printk("IDE register error\n"); - set_mb_power(bay, 0); - } else { - printk(KERN_DEBUG "media-bay %d is ide%d\n", i, bay->cd_index); - MBDBG("mediabay %d IDE ready\n", i); - } - break; - } else if (bay->timer > 0) - bay->timer -= msecs_to_jiffies(MB_POLL_DELAY); - if (bay->timer <= 0) { - printk("\nIDE Timeout in bay %d !, IDE state is: 0x%02x\n", - i, readb(bay->cd_base + 0x70)); - MBDBG("mediabay%d: nIDE Timeout !\n", i); - set_mb_power(bay, 0); - bay->timer = 0; - } - break; -#endif /* CONFIG_BLK_DEV_IDE_PMAC */ + case mb_powering_down: bay->state = mb_empty; -#ifdef CONFIG_BLK_DEV_IDE_PMAC - if (bay->cd_index >= 0) { - printk(KERN_DEBUG "Unregistering mb %d ide, index:%d\n", i, - bay->cd_index); - ide_port_unregister_devices(bay->cd_port); - bay->cd_index = -1; - } - if (bay->cd_retry) { - if (bay->cd_retry > MAX_CD_RETRIES) { - /* Should add an error sound (sort of beep in dmasound) */ - printk("\nmedia-bay %d, IDE device badly inserted or unrecognised\n", i); - } else { - /* Force a new power down/up sequence */ - bay->content_id = MB_NO; - } - } -#endif /* CONFIG_BLK_DEV_IDE_PMAC */ - MBDBG("mediabay%d: end of power down\n", i); + device_for_each_child(&bay->mdev->ofdev.dev, + bay, mb_broadcast_hotplug); + pr_debug("mediabay%d: end of power down\n", i); break; } } @@ -676,11 +603,6 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de bay->last_value = bay->ops->content(bay); bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY); bay->state = mb_empty; - do { - msleep(MB_POLL_DELAY); - media_bay_step(i); - } while((bay->state != mb_empty) && - (bay->state != mb_up)); /* Mark us ready by filling our mdev data */ macio_set_drvdata(mdev, bay); @@ -725,7 +647,7 @@ static int media_bay_resume(struct macio_dev *mdev) set_mb_power(bay, 0); msleep(MB_POWER_DELAY); if (bay->ops->content(bay) != bay->content_id) { - printk("mediabay%d: content changed during sleep...\n", bay->index); + printk("mediabay%d: Content changed during sleep...\n", bay->index); mutex_unlock(&bay->lock); return 0; } @@ -733,9 +655,6 @@ static int media_bay_resume(struct macio_dev *mdev) bay->last_value = bay->content_id; bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY); bay->timer = msecs_to_jiffies(MB_POWER_DELAY); -#ifdef CONFIG_BLK_DEV_IDE_PMAC - bay->cd_retry = 0; -#endif do { msleep(MB_POLL_DELAY); media_bay_step(bay->index); @@ -823,9 +742,6 @@ static int __init media_bay_init(void) for (i=0; i<MAX_BAYS; i++) { memset((char *)&media_bays[i], 0, sizeof(struct media_bay_info)); media_bays[i].content_id = -1; -#ifdef CONFIG_BLK_DEV_IDE_PMAC - media_bays[i].cd_index = -1; -#endif } if (!machine_is(powermac)) return 0; diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c index 95b676a19be7..5ff47ba7f2d0 100644 --- a/drivers/macintosh/therm_adt746x.c +++ b/drivers/macintosh/therm_adt746x.c @@ -79,6 +79,7 @@ struct thermostat { u8 limits[3]; int last_speed[2]; int last_var[2]; + int pwm_inv[2]; }; static enum {ADT7460, ADT7467} therm_type; @@ -229,19 +230,23 @@ static void write_fan_speed(struct thermostat *th, int speed, int fan) if (speed >= 0) { manual = read_reg(th, MANUAL_MODE[fan]); + manual &= ~INVERT_MASK; write_reg(th, MANUAL_MODE[fan], - (manual|MANUAL_MASK) & (~INVERT_MASK)); + manual | MANUAL_MASK | th->pwm_inv[fan]); write_reg(th, FAN_SPD_SET[fan], speed); } else { /* back to automatic */ if(therm_type == ADT7460) { manual = read_reg(th, MANUAL_MODE[fan]) & (~MANUAL_MASK); - + manual &= ~INVERT_MASK; + manual |= th->pwm_inv[fan]; write_reg(th, MANUAL_MODE[fan], manual|REM_CONTROL[fan]); } else { manual = read_reg(th, MANUAL_MODE[fan]); + manual &= ~INVERT_MASK; + manual |= th->pwm_inv[fan]; write_reg(th, MANUAL_MODE[fan], manual&(~AUTO_MASK)); } } @@ -418,6 +423,10 @@ static int probe_thermostat(struct i2c_client *client, thermostat = th; + /* record invert bit status because fw can corrupt it after suspend */ + th->pwm_inv[0] = read_reg(th, MANUAL_MODE[0]) & INVERT_MASK; + th->pwm_inv[1] = read_reg(th, MANUAL_MODE[1]) & INVERT_MASK; + /* be sure to really write fan speed the first time */ th->last_speed[0] = -2; th->last_speed[1] = -2; diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 6f308a4757ee..db379c381432 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -36,6 +36,7 @@ #include <linux/spinlock.h> #include <linux/pm.h> #include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/device.h> @@ -186,17 +187,11 @@ static int init_pmu(void); static void pmu_start(void); static irqreturn_t via_pmu_interrupt(int irq, void *arg); static irqreturn_t gpio1_interrupt(int irq, void *arg); -static int proc_get_info(char *page, char **start, off_t off, - int count, int *eof, void *data); -static int proc_get_irqstats(char *page, char **start, off_t off, - int count, int *eof, void *data); +static const struct file_operations pmu_info_proc_fops; +static const struct file_operations pmu_irqstats_proc_fops; static void pmu_pass_intr(unsigned char *data, int len); -static int proc_get_batt(char *page, char **start, off_t off, - int count, int *eof, void *data); -static int proc_read_options(char *page, char **start, off_t off, - int count, int *eof, void *data); -static int proc_write_options(struct file *file, const char __user *buffer, - unsigned long count, void *data); +static const struct file_operations pmu_battery_proc_fops; +static const struct file_operations pmu_options_proc_fops; #ifdef CONFIG_ADB struct adb_driver via_pmu_driver = { @@ -507,19 +502,15 @@ static int __init via_pmu_dev_init(void) for (i=0; i<pmu_battery_count; i++) { char title[16]; sprintf(title, "battery_%ld", i); - proc_pmu_batt[i] = create_proc_read_entry(title, 0, proc_pmu_root, - proc_get_batt, (void *)i); + proc_pmu_batt[i] = proc_create_data(title, 0, proc_pmu_root, + &pmu_battery_proc_fops, (void *)i); } - proc_pmu_info = create_proc_read_entry("info", 0, proc_pmu_root, - proc_get_info, NULL); - proc_pmu_irqstats = create_proc_read_entry("interrupts", 0, proc_pmu_root, - proc_get_irqstats, NULL); - proc_pmu_options = create_proc_entry("options", 0600, proc_pmu_root); - if (proc_pmu_options) { - proc_pmu_options->read_proc = proc_read_options; - proc_pmu_options->write_proc = proc_write_options; - } + proc_pmu_info = proc_create("info", 0, proc_pmu_root, &pmu_info_proc_fops); + proc_pmu_irqstats = proc_create("interrupts", 0, proc_pmu_root, + &pmu_irqstats_proc_fops); + proc_pmu_options = proc_create("options", 0600, proc_pmu_root, + &pmu_options_proc_fops); } return 0; } @@ -799,27 +790,33 @@ query_battery_state(void) 2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1); } -static int -proc_get_info(char *page, char **start, off_t off, - int count, int *eof, void *data) +static int pmu_info_proc_show(struct seq_file *m, void *v) { - char* p = page; - - p += sprintf(p, "PMU driver version : %d\n", PMU_DRIVER_VERSION); - p += sprintf(p, "PMU firmware version : %02x\n", pmu_version); - p += sprintf(p, "AC Power : %d\n", + seq_printf(m, "PMU driver version : %d\n", PMU_DRIVER_VERSION); + seq_printf(m, "PMU firmware version : %02x\n", pmu_version); + seq_printf(m, "AC Power : %d\n", ((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0) || pmu_battery_count == 0); - p += sprintf(p, "Battery count : %d\n", pmu_battery_count); + seq_printf(m, "Battery count : %d\n", pmu_battery_count); + + return 0; +} - return p - page; +static int pmu_info_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, pmu_info_proc_show, NULL); } -static int -proc_get_irqstats(char *page, char **start, off_t off, - int count, int *eof, void *data) +static const struct file_operations pmu_info_proc_fops = { + .owner = THIS_MODULE, + .open = pmu_info_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int pmu_irqstats_proc_show(struct seq_file *m, void *v) { int i; - char* p = page; static const char *irq_names[] = { "Total CB1 triggered events", "Total GPIO1 triggered events", @@ -835,60 +832,76 @@ proc_get_irqstats(char *page, char **start, off_t off, }; for (i=0; i<11; i++) { - p += sprintf(p, " %2u: %10u (%s)\n", + seq_printf(m, " %2u: %10u (%s)\n", i, pmu_irq_stats[i], irq_names[i]); } - return p - page; + return 0; } -static int -proc_get_batt(char *page, char **start, off_t off, - int count, int *eof, void *data) +static int pmu_irqstats_proc_open(struct inode *inode, struct file *file) { - long batnum = (long)data; - char *p = page; + return single_open(file, pmu_irqstats_proc_show, NULL); +} + +static const struct file_operations pmu_irqstats_proc_fops = { + .owner = THIS_MODULE, + .open = pmu_irqstats_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int pmu_battery_proc_show(struct seq_file *m, void *v) +{ + long batnum = (long)m->private; - p += sprintf(p, "\n"); - p += sprintf(p, "flags : %08x\n", - pmu_batteries[batnum].flags); - p += sprintf(p, "charge : %d\n", - pmu_batteries[batnum].charge); - p += sprintf(p, "max_charge : %d\n", - pmu_batteries[batnum].max_charge); - p += sprintf(p, "current : %d\n", - pmu_batteries[batnum].amperage); - p += sprintf(p, "voltage : %d\n", - pmu_batteries[batnum].voltage); - p += sprintf(p, "time rem. : %d\n", - pmu_batteries[batnum].time_remaining); - - return p - page; + seq_putc(m, '\n'); + seq_printf(m, "flags : %08x\n", pmu_batteries[batnum].flags); + seq_printf(m, "charge : %d\n", pmu_batteries[batnum].charge); + seq_printf(m, "max_charge : %d\n", pmu_batteries[batnum].max_charge); + seq_printf(m, "current : %d\n", pmu_batteries[batnum].amperage); + seq_printf(m, "voltage : %d\n", pmu_batteries[batnum].voltage); + seq_printf(m, "time rem. : %d\n", pmu_batteries[batnum].time_remaining); + return 0; } -static int -proc_read_options(char *page, char **start, off_t off, - int count, int *eof, void *data) +static int pmu_battery_proc_open(struct inode *inode, struct file *file) { - char *p = page; + return single_open(file, pmu_battery_proc_show, PDE(inode)->data); +} +static const struct file_operations pmu_battery_proc_fops = { + .owner = THIS_MODULE, + .open = pmu_battery_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int pmu_options_proc_show(struct seq_file *m, void *v) +{ #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) if (pmu_kind == PMU_KEYLARGO_BASED && pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0) - p += sprintf(p, "lid_wakeup=%d\n", option_lid_wakeup); + seq_printf(m, "lid_wakeup=%d\n", option_lid_wakeup); #endif if (pmu_kind == PMU_KEYLARGO_BASED) - p += sprintf(p, "server_mode=%d\n", option_server_mode); + seq_printf(m, "server_mode=%d\n", option_server_mode); - return p - page; + return 0; } - -static int -proc_write_options(struct file *file, const char __user *buffer, - unsigned long count, void *data) + +static int pmu_options_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, pmu_options_proc_show, NULL); +} + +static ssize_t pmu_options_proc_write(struct file *file, + const char __user *buffer, size_t count, loff_t *pos) { char tmp[33]; char *label, *val; - unsigned long fcount = count; + size_t fcount = count; if (!count) return -EINVAL; @@ -927,6 +940,15 @@ proc_write_options(struct file *file, const char __user *buffer, return fcount; } +static const struct file_operations pmu_options_proc_fops = { + .owner = THIS_MODULE, + .open = pmu_options_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = pmu_options_proc_write, +}; + #ifdef CONFIG_ADB /* Send an ADB command */ static int pmu_send_request(struct adb_request *req, int sync) diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c index 961fa0e7c2cf..6c68b9e5f5c4 100644 --- a/drivers/macintosh/windfarm_smu_controls.c +++ b/drivers/macintosh/windfarm_smu_controls.c @@ -202,6 +202,8 @@ static struct smu_fan_control *smu_fan_create(struct device_node *node, fct->ctrl.name = "cpu-front-fan-1"; else if (!strcmp(l, "CPU A PUMP")) fct->ctrl.name = "cpu-pump-0"; + else if (!strcmp(l, "CPU B PUMP")) + fct->ctrl.name = "cpu-pump-1"; else if (!strcmp(l, "Slots Fan") || !strcmp(l, "Slots fan") || !strcmp(l, "EXPANSION SLOTS INTAKE")) fct->ctrl.name = "slots-fan"; diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index 0c44d560bf1a..0c7a63c1f12f 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c @@ -22,6 +22,8 @@ #include <linux/mmc/core.h> #include <linux/mmc/host.h> +MODULE_LICENSE("GPL"); + enum { CD_GPIO = 0, WP_GPIO, diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 94058c62620a..28fce65b8594 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -133,6 +133,14 @@ config SPI_LM70_LLP which interfaces to an LM70 temperature sensor using a parallel port. +config SPI_MPC52xx + tristate "Freescale MPC52xx SPI (non-PSC) controller support" + depends on PPC_MPC52xx && SPI + select SPI_MASTER_OF + help + This drivers supports the MPC52xx SPI controller in master SPI + mode. + config SPI_MPC52xx_PSC tristate "Freescale MPC52xx PSC SPI controller" depends on PPC_MPC52xx && EXPERIMENTAL diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 21a118269cac..e3f092a9afa5 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o obj-$(CONFIG_SPI_ORION) += orion_spi.o obj-$(CONFIG_SPI_PL022) += amba-pl022.o obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o +obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c index 1b74d5ca03f3..f50c81df336a 100644 --- a/drivers/spi/mpc52xx_psc_spi.c +++ b/drivers/spi/mpc52xx_psc_spi.c @@ -17,6 +17,7 @@ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/of_platform.h> +#include <linux/of_spi.h> #include <linux/workqueue.h> #include <linux/completion.h> #include <linux/io.h> @@ -313,11 +314,13 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) struct mpc52xx_psc __iomem *psc = mps->psc; struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; u32 mclken_div; - int ret = 0; + int ret; /* default sysclk is 512MHz */ mclken_div = (mps->sysclk ? mps->sysclk : 512000000) / MCLK; - mpc52xx_set_psc_clkdiv(psc_id, mclken_div); + ret = mpc52xx_set_psc_clkdiv(psc_id, mclken_div); + if (ret) + return ret; /* Reset the PSC into a known state */ out_8(&psc->command, MPC52xx_PSC_RST_RX); @@ -341,7 +344,7 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) mps->bits_per_word = 8; - return ret; + return 0; } static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id) @@ -410,8 +413,10 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, goto free_master; ret = mpc52xx_psc_spi_port_config(master->bus_num, mps); - if (ret < 0) + if (ret < 0) { + dev_err(dev, "can't configure PSC! Is it capable of SPI?\n"); goto free_irq; + } spin_lock_init(&mps->lock); init_completion(&mps->done); @@ -464,10 +469,11 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op, const u32 *regaddr_p; u64 regaddr64, size64; s16 id = -1; + int rc; regaddr_p = of_get_address(op->node, 0, &size64, NULL); if (!regaddr_p) { - printk(KERN_ERR "Invalid PSC address\n"); + dev_err(&op->dev, "Invalid PSC address\n"); return -EINVAL; } regaddr64 = of_translate_address(op->node, regaddr_p); @@ -478,15 +484,18 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op, psc_nump = of_get_property(op->node, "cell-index", NULL); if (!psc_nump || *psc_nump > 5) { - printk(KERN_ERR "mpc52xx_psc_spi: Device node %s has invalid " - "cell-index property\n", op->node->full_name); + dev_err(&op->dev, "Invalid cell-index property\n"); return -EINVAL; } id = *psc_nump + 1; } - return mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64, + rc = mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64, irq_of_parse_and_map(op->node, 0), id); + if (rc == 0) + of_register_spi_devices(dev_get_drvdata(&op->dev), op->node); + + return rc; } static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op) diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c new file mode 100644 index 000000000000..ef8379b2c172 --- /dev/null +++ b/drivers/spi/mpc52xx_spi.c @@ -0,0 +1,520 @@ +/* + * MPC52xx SPI bus driver. + * + * Copyright (C) 2008 Secret Lab Technologies Ltd. + * + * This file is released under the GPLv2 + * + * This is the driver for the MPC5200's dedicated SPI controller. + * + * Note: this driver does not support the MPC5200 PSC in SPI mode. For + * that driver see drivers/spi/mpc52xx_psc_spi.c + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/of_platform.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/spi/spi.h> +#include <linux/spi/mpc52xx_spi.h> +#include <linux/of_spi.h> +#include <linux/io.h> +#include <asm/time.h> +#include <asm/mpc52xx.h> + +MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); +MODULE_DESCRIPTION("MPC52xx SPI (non-PSC) Driver"); +MODULE_LICENSE("GPL"); + +/* Register offsets */ +#define SPI_CTRL1 0x00 +#define SPI_CTRL1_SPIE (1 << 7) +#define SPI_CTRL1_SPE (1 << 6) +#define SPI_CTRL1_MSTR (1 << 4) +#define SPI_CTRL1_CPOL (1 << 3) +#define SPI_CTRL1_CPHA (1 << 2) +#define SPI_CTRL1_SSOE (1 << 1) +#define SPI_CTRL1_LSBFE (1 << 0) + +#define SPI_CTRL2 0x01 +#define SPI_BRR 0x04 + +#define SPI_STATUS 0x05 +#define SPI_STATUS_SPIF (1 << 7) +#define SPI_STATUS_WCOL (1 << 6) +#define SPI_STATUS_MODF (1 << 4) + +#define SPI_DATA 0x09 +#define SPI_PORTDATA 0x0d +#define SPI_DATADIR 0x10 + +/* FSM state return values */ +#define FSM_STOP 0 /* Nothing more for the state machine to */ + /* do. If something interesting happens */ + /* then and IRQ will be received */ +#define FSM_POLL 1 /* need to poll for completion, an IRQ is */ + /* not expected */ +#define FSM_CONTINUE 2 /* Keep iterating the state machine */ + +/* Driver internal data */ +struct mpc52xx_spi { + struct spi_master *master; + u32 sysclk; + void __iomem *regs; + int irq0; /* MODF irq */ + int irq1; /* SPIF irq */ + int ipb_freq; + + /* Statistics */ + int msg_count; + int wcol_count; + int wcol_ticks; + u32 wcol_tx_timestamp; + int modf_count; + int byte_count; + + struct list_head queue; /* queue of pending messages */ + spinlock_t lock; + struct work_struct work; + + + /* Details of current transfer (length, and buffer pointers) */ + struct spi_message *message; /* current message */ + struct spi_transfer *transfer; /* current transfer */ + int (*state)(int irq, struct mpc52xx_spi *ms, u8 status, u8 data); + int len; + int timestamp; + u8 *rx_buf; + const u8 *tx_buf; + int cs_change; +}; + +/* + * CS control function + */ +static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value) +{ + out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08); +} + +/* + * Start a new transfer. This is called both by the idle state + * for the first transfer in a message, and by the wait state when the + * previous transfer in a message is complete. + */ +static void mpc52xx_spi_start_transfer(struct mpc52xx_spi *ms) +{ + ms->rx_buf = ms->transfer->rx_buf; + ms->tx_buf = ms->transfer->tx_buf; + ms->len = ms->transfer->len; + + /* Activate the chip select */ + if (ms->cs_change) + mpc52xx_spi_chipsel(ms, 1); + ms->cs_change = ms->transfer->cs_change; + + /* Write out the first byte */ + ms->wcol_tx_timestamp = get_tbl(); + if (ms->tx_buf) + out_8(ms->regs + SPI_DATA, *ms->tx_buf++); + else + out_8(ms->regs + SPI_DATA, 0); +} + +/* Forward declaration of state handlers */ +static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms, + u8 status, u8 data); +static int mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, + u8 status, u8 data); + +/* + * IDLE state + * + * No transfers are in progress; if another transfer is pending then retrieve + * it and kick it off. Otherwise, stop processing the state machine + */ +static int +mpc52xx_spi_fsmstate_idle(int irq, struct mpc52xx_spi *ms, u8 status, u8 data) +{ + struct spi_device *spi; + int spr, sppr; + u8 ctrl1; + + if (status && (irq != NO_IRQ)) + dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n", + status); + + /* Check if there is another transfer waiting. */ + if (list_empty(&ms->queue)) + return FSM_STOP; + + /* get the head of the queue */ + ms->message = list_first_entry(&ms->queue, struct spi_message, queue); + list_del_init(&ms->message->queue); + + /* Setup the controller parameters */ + ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR; + spi = ms->message->spi; + if (spi->mode & SPI_CPHA) + ctrl1 |= SPI_CTRL1_CPHA; + if (spi->mode & SPI_CPOL) + ctrl1 |= SPI_CTRL1_CPOL; + if (spi->mode & SPI_LSB_FIRST) + ctrl1 |= SPI_CTRL1_LSBFE; + out_8(ms->regs + SPI_CTRL1, ctrl1); + + /* Setup the controller speed */ + /* minimum divider is '2'. Also, add '1' to force rounding the + * divider up. */ + sppr = ((ms->ipb_freq / ms->message->spi->max_speed_hz) + 1) >> 1; + spr = 0; + if (sppr < 1) + sppr = 1; + while (((sppr - 1) & ~0x7) != 0) { + sppr = (sppr + 1) >> 1; /* add '1' to force rounding up */ + spr++; + } + sppr--; /* sppr quantity in register is offset by 1 */ + if (spr > 7) { + /* Don't overrun limits of SPI baudrate register */ + spr = 7; + sppr = 7; + } + out_8(ms->regs + SPI_BRR, sppr << 4 | spr); /* Set speed */ + + ms->cs_change = 1; + ms->transfer = container_of(ms->message->transfers.next, + struct spi_transfer, transfer_list); + + mpc52xx_spi_start_transfer(ms); + ms->state = mpc52xx_spi_fsmstate_transfer; + + return FSM_CONTINUE; +} + +/* + * TRANSFER state + * + * In the middle of a transfer. If the SPI core has completed processing + * a byte, then read out the received data and write out the next byte + * (unless this transfer is finished; in which case go on to the wait + * state) + */ +static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms, + u8 status, u8 data) +{ + if (!status) + return ms->irq0 ? FSM_STOP : FSM_POLL; + + if (status & SPI_STATUS_WCOL) { + /* The SPI controller is stoopid. At slower speeds, it may + * raise the SPIF flag before the state machine is actually + * finished, which causes a collision (internal to the state + * machine only). The manual recommends inserting a delay + * between receiving the interrupt and sending the next byte, + * but it can also be worked around simply by retrying the + * transfer which is what we do here. */ + ms->wcol_count++; + ms->wcol_ticks += get_tbl() - ms->wcol_tx_timestamp; + ms->wcol_tx_timestamp = get_tbl(); + data = 0; + if (ms->tx_buf) + data = *(ms->tx_buf-1); + out_8(ms->regs + SPI_DATA, data); /* try again */ + return FSM_CONTINUE; + } else if (status & SPI_STATUS_MODF) { + ms->modf_count++; + dev_err(&ms->master->dev, "mode fault\n"); + mpc52xx_spi_chipsel(ms, 0); + ms->message->status = -EIO; + ms->message->complete(ms->message->context); + ms->state = mpc52xx_spi_fsmstate_idle; + return FSM_CONTINUE; + } + + /* Read data out of the spi device */ + ms->byte_count++; + if (ms->rx_buf) + *ms->rx_buf++ = data; + + /* Is the transfer complete? */ + ms->len--; + if (ms->len == 0) { + ms->timestamp = get_tbl(); + ms->timestamp += ms->transfer->delay_usecs * tb_ticks_per_usec; + ms->state = mpc52xx_spi_fsmstate_wait; + return FSM_CONTINUE; + } + + /* Write out the next byte */ + ms->wcol_tx_timestamp = get_tbl(); + if (ms->tx_buf) + out_8(ms->regs + SPI_DATA, *ms->tx_buf++); + else + out_8(ms->regs + SPI_DATA, 0); + + return FSM_CONTINUE; +} + +/* + * WAIT state + * + * A transfer has completed; need to wait for the delay period to complete + * before starting the next transfer + */ +static int +mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data) +{ + if (status && irq) + dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n", + status); + + if (((int)get_tbl()) - ms->timestamp < 0) + return FSM_POLL; + + ms->message->actual_length += ms->transfer->len; + + /* Check if there is another transfer in this message. If there + * aren't then deactivate CS, notify sender, and drop back to idle + * to start the next message. */ + if (ms->transfer->transfer_list.next == &ms->message->transfers) { + ms->msg_count++; + mpc52xx_spi_chipsel(ms, 0); + ms->message->status = 0; + ms->message->complete(ms->message->context); + ms->state = mpc52xx_spi_fsmstate_idle; + return FSM_CONTINUE; + } + + /* There is another transfer; kick it off */ + + if (ms->cs_change) + mpc52xx_spi_chipsel(ms, 0); + + ms->transfer = container_of(ms->transfer->transfer_list.next, + struct spi_transfer, transfer_list); + mpc52xx_spi_start_transfer(ms); + ms->state = mpc52xx_spi_fsmstate_transfer; + return FSM_CONTINUE; +} + +/** + * mpc52xx_spi_fsm_process - Finite State Machine iteration function + * @irq: irq number that triggered the FSM or 0 for polling + * @ms: pointer to mpc52xx_spi driver data + */ +static void mpc52xx_spi_fsm_process(int irq, struct mpc52xx_spi *ms) +{ + int rc = FSM_CONTINUE; + u8 status, data; + + while (rc == FSM_CONTINUE) { + /* Interrupt cleared by read of STATUS followed by + * read of DATA registers */ + status = in_8(ms->regs + SPI_STATUS); + data = in_8(ms->regs + SPI_DATA); + rc = ms->state(irq, ms, status, data); + } + + if (rc == FSM_POLL) + schedule_work(&ms->work); +} + +/** + * mpc52xx_spi_irq - IRQ handler + */ +static irqreturn_t mpc52xx_spi_irq(int irq, void *_ms) +{ + struct mpc52xx_spi *ms = _ms; + spin_lock(&ms->lock); + mpc52xx_spi_fsm_process(irq, ms); + spin_unlock(&ms->lock); + return IRQ_HANDLED; +} + +/** + * mpc52xx_spi_wq - Workqueue function for polling the state machine + */ +static void mpc52xx_spi_wq(struct work_struct *work) +{ + struct mpc52xx_spi *ms = container_of(work, struct mpc52xx_spi, work); + unsigned long flags; + + spin_lock_irqsave(&ms->lock, flags); + mpc52xx_spi_fsm_process(0, ms); + spin_unlock_irqrestore(&ms->lock, flags); +} + +/* + * spi_master ops + */ + +static int mpc52xx_spi_setup(struct spi_device *spi) +{ + if (spi->bits_per_word % 8) + return -EINVAL; + + if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) + return -EINVAL; + + if (spi->chip_select >= spi->master->num_chipselect) + return -EINVAL; + + return 0; +} + +static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master); + unsigned long flags; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + spin_lock_irqsave(&ms->lock, flags); + list_add_tail(&m->queue, &ms->queue); + spin_unlock_irqrestore(&ms->lock, flags); + schedule_work(&ms->work); + + return 0; +} + +/* + * OF Platform Bus Binding + */ +static int __devinit mpc52xx_spi_probe(struct of_device *op, + const struct of_device_id *match) +{ + struct spi_master *master; + struct mpc52xx_spi *ms; + void __iomem *regs; + int rc; + + /* MMIO registers */ + dev_dbg(&op->dev, "probing mpc5200 SPI device\n"); + regs = of_iomap(op->node, 0); + if (!regs) + return -ENODEV; + + /* initialize the device */ + out_8(regs+SPI_CTRL1, SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR); + out_8(regs + SPI_CTRL2, 0x0); + out_8(regs + SPI_DATADIR, 0xe); /* Set output pins */ + out_8(regs + SPI_PORTDATA, 0x8); /* Deassert /SS signal */ + + /* Clear the status register and re-read it to check for a MODF + * failure. This driver cannot currently handle multiple masters + * on the SPI bus. This fault will also occur if the SPI signals + * are not connected to any pins (port_config setting) */ + in_8(regs + SPI_STATUS); + in_8(regs + SPI_DATA); + if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) { + dev_err(&op->dev, "mode fault; is port_config correct?\n"); + rc = -EIO; + goto err_init; + } + + dev_dbg(&op->dev, "allocating spi_master struct\n"); + master = spi_alloc_master(&op->dev, sizeof *ms); + if (!master) { + rc = -ENOMEM; + goto err_alloc; + } + master->bus_num = -1; + master->num_chipselect = 1; + master->setup = mpc52xx_spi_setup; + master->transfer = mpc52xx_spi_transfer; + dev_set_drvdata(&op->dev, master); + + ms = spi_master_get_devdata(master); + ms->master = master; + ms->regs = regs; + ms->irq0 = irq_of_parse_and_map(op->node, 0); + ms->irq1 = irq_of_parse_and_map(op->node, 1); + ms->state = mpc52xx_spi_fsmstate_idle; + ms->ipb_freq = mpc5xxx_get_bus_frequency(op->node); + spin_lock_init(&ms->lock); + INIT_LIST_HEAD(&ms->queue); + INIT_WORK(&ms->work, mpc52xx_spi_wq); + + /* Decide if interrupts can be used */ + if (ms->irq0 && ms->irq1) { + rc = request_irq(ms->irq0, mpc52xx_spi_irq, IRQF_SAMPLE_RANDOM, + "mpc5200-spi-modf", ms); + rc |= request_irq(ms->irq1, mpc52xx_spi_irq, IRQF_SAMPLE_RANDOM, + "mpc5200-spi-spiF", ms); + if (rc) { + free_irq(ms->irq0, ms); + free_irq(ms->irq1, ms); + ms->irq0 = ms->irq1 = 0; + } + } else { + /* operate in polled mode */ + ms->irq0 = ms->irq1 = 0; + } + + if (!ms->irq0) + dev_info(&op->dev, "using polled mode\n"); + + dev_dbg(&op->dev, "registering spi_master struct\n"); + rc = spi_register_master(master); + if (rc) + goto err_register; + + of_register_spi_devices(master, op->node); + dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n"); + + return rc; + + err_register: + dev_err(&ms->master->dev, "initialization failed\n"); + spi_master_put(master); + err_alloc: + err_init: + iounmap(regs); + return rc; +} + +static int __devexit mpc52xx_spi_remove(struct of_device *op) +{ + struct spi_master *master = dev_get_drvdata(&op->dev); + struct mpc52xx_spi *ms = spi_master_get_devdata(master); + + free_irq(ms->irq0, ms); + free_irq(ms->irq1, ms); + + spi_unregister_master(master); + spi_master_put(master); + iounmap(ms->regs); + + return 0; +} + +static struct of_device_id mpc52xx_spi_match[] __devinitdata = { + { .compatible = "fsl,mpc5200-spi", }, + {} +}; +MODULE_DEVICE_TABLE(of, mpc52xx_spi_match); + +static struct of_platform_driver mpc52xx_spi_of_driver = { + .owner = THIS_MODULE, + .name = "mpc52xx-spi", + .match_table = mpc52xx_spi_match, + .probe = mpc52xx_spi_probe, + .remove = __exit_p(mpc52xx_spi_remove), +}; + +static int __init mpc52xx_spi_init(void) +{ + return of_register_platform_driver(&mpc52xx_spi_of_driver); +} +module_init(mpc52xx_spi_init); + +static void __exit mpc52xx_spi_exit(void) +{ + of_unregister_platform_driver(&mpc52xx_spi_of_driver); +} +module_exit(mpc52xx_spi_exit); + diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c index 46b8c5c2f45e..5a143b9f6361 100644 --- a/drivers/spi/xilinx_spi.c +++ b/drivers/spi/xilinx_spi.c @@ -148,7 +148,8 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi, { u8 bits_per_word; - bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; + bits_per_word = (t && t->bits_per_word) + ? t->bits_per_word : spi->bits_per_word; if (bits_per_word != 8) { dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", __func__, bits_per_word); diff --git a/drivers/video/offb.c b/drivers/video/offb.c index 4d8c54c23dd7..b043ac83c412 100644 --- a/drivers/video/offb.c +++ b/drivers/video/offb.c @@ -282,8 +282,17 @@ static int offb_set_par(struct fb_info *info) return 0; } +static void offb_destroy(struct fb_info *info) +{ + if (info->screen_base) + iounmap(info->screen_base); + release_mem_region(info->aperture_base, info->aperture_size); + framebuffer_release(info); +} + static struct fb_ops offb_ops = { .owner = THIS_MODULE, + .fb_destroy = offb_destroy, .fb_setcolreg = offb_setcolreg, .fb_set_par = offb_set_par, .fb_blank = offb_blank, @@ -482,10 +491,14 @@ static void __init offb_init_fb(const char *name, const char *full_name, var->sync = 0; var->vmode = FB_VMODE_NONINTERLACED; + /* set offb aperture size for generic probing */ + info->aperture_base = address; + info->aperture_size = fix->smem_len; + info->fbops = &offb_ops; info->screen_base = ioremap(address, fix->smem_len); info->pseudo_palette = (void *) (info + 1); - info->flags = FBINFO_DEFAULT | foreign_endian; + info->flags = FBINFO_DEFAULT | FBINFO_MISC_FIRMWARE | foreign_endian; fb_alloc_cmap(&info->cmap, 256, 0); diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 3711b888d482..d958b76430a2 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -861,8 +861,10 @@ config GEF_WDT Watchdog timer found in a number of GE Fanuc single board computers. config MPC5200_WDT - tristate "MPC5200 Watchdog Timer" + bool "MPC52xx Watchdog Timer" depends on PPC_MPC52xx + help + Use General Purpose Timer (GPT) 0 on the MPC5200 as Watchdog. config 8xxx_WDT tristate "MPC8xxx Platform Watchdog Timer" diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 699199b1baa6..89c045dc468e 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -118,7 +118,6 @@ obj-$(CONFIG_TXX9_WDT) += txx9wdt.o # POWERPC Architecture obj-$(CONFIG_GEF_WDT) += gef_wdt.o -obj-$(CONFIG_MPC5200_WDT) += mpc5200_wdt.o obj-$(CONFIG_8xxx_WDT) += mpc8xxx_wdt.o obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o obj-$(CONFIG_PIKA_WDT) += pika_wdt.o diff --git a/drivers/watchdog/mpc5200_wdt.c b/drivers/watchdog/mpc5200_wdt.c deleted file mode 100644 index fa9c47ce0ae7..000000000000 --- a/drivers/watchdog/mpc5200_wdt.c +++ /dev/null @@ -1,293 +0,0 @@ -#include <linux/init.h> -#include <linux/module.h> -#include <linux/miscdevice.h> -#include <linux/watchdog.h> -#include <linux/io.h> -#include <linux/spinlock.h> -#include <linux/of_platform.h> -#include <linux/uaccess.h> -#include <asm/mpc52xx.h> - - -#define GPT_MODE_WDT (1 << 15) -#define GPT_MODE_CE (1 << 12) -#define GPT_MODE_MS_TIMER (0x4) - - -struct mpc5200_wdt { - unsigned count; /* timer ticks before watchdog kicks in */ - long ipb_freq; - struct miscdevice miscdev; - struct resource mem; - struct mpc52xx_gpt __iomem *regs; - spinlock_t io_lock; -}; - -/* is_active stores wether or not the /dev/watchdog device is opened */ -static unsigned long is_active; - -/* misc devices don't provide a way, to get back to 'dev' or 'miscdev' from - * file operations, which sucks. But there can be max 1 watchdog anyway, so... - */ -static struct mpc5200_wdt *wdt_global; - - -/* helper to calculate timeout in timer counts */ -static void mpc5200_wdt_set_timeout(struct mpc5200_wdt *wdt, int timeout) -{ - /* use biggest prescaler of 64k */ - wdt->count = (wdt->ipb_freq + 0xffff) / 0x10000 * timeout; - - if (wdt->count > 0xffff) - wdt->count = 0xffff; -} -/* return timeout in seconds (calculated from timer count) */ -static int mpc5200_wdt_get_timeout(struct mpc5200_wdt *wdt) -{ - return wdt->count * 0x10000 / wdt->ipb_freq; -} - - -/* watchdog operations */ -static int mpc5200_wdt_start(struct mpc5200_wdt *wdt) -{ - spin_lock(&wdt->io_lock); - /* disable */ - out_be32(&wdt->regs->mode, 0); - /* set timeout, with maximum prescaler */ - out_be32(&wdt->regs->count, 0x0 | wdt->count); - /* enable watchdog */ - out_be32(&wdt->regs->mode, GPT_MODE_CE | GPT_MODE_WDT | - GPT_MODE_MS_TIMER); - spin_unlock(&wdt->io_lock); - - return 0; -} -static int mpc5200_wdt_ping(struct mpc5200_wdt *wdt) -{ - spin_lock(&wdt->io_lock); - /* writing A5 to OCPW resets the watchdog */ - out_be32(&wdt->regs->mode, 0xA5000000 | - (0xffffff & in_be32(&wdt->regs->mode))); - spin_unlock(&wdt->io_lock); - return 0; -} -static int mpc5200_wdt_stop(struct mpc5200_wdt *wdt) -{ - spin_lock(&wdt->io_lock); - /* disable */ - out_be32(&wdt->regs->mode, 0); - spin_unlock(&wdt->io_lock); - return 0; -} - - -/* file operations */ -static ssize_t mpc5200_wdt_write(struct file *file, const char __user *data, - size_t len, loff_t *ppos) -{ - struct mpc5200_wdt *wdt = file->private_data; - mpc5200_wdt_ping(wdt); - return 0; -} -static struct watchdog_info mpc5200_wdt_info = { - .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, - .identity = "mpc5200 watchdog on GPT0", -}; -static long mpc5200_wdt_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - struct mpc5200_wdt *wdt = file->private_data; - int __user *data = (int __user *)arg; - int timeout; - int ret = 0; - - switch (cmd) { - case WDIOC_GETSUPPORT: - ret = copy_to_user(data, &mpc5200_wdt_info, - sizeof(mpc5200_wdt_info)); - if (ret) - ret = -EFAULT; - break; - - case WDIOC_GETSTATUS: - case WDIOC_GETBOOTSTATUS: - ret = put_user(0, data); - break; - - case WDIOC_KEEPALIVE: - mpc5200_wdt_ping(wdt); - break; - - case WDIOC_SETTIMEOUT: - ret = get_user(timeout, data); - if (ret) - break; - mpc5200_wdt_set_timeout(wdt, timeout); - mpc5200_wdt_start(wdt); - /* fall through and return the timeout */ - - case WDIOC_GETTIMEOUT: - timeout = mpc5200_wdt_get_timeout(wdt); - ret = put_user(timeout, data); - break; - - default: - ret = -ENOTTY; - } - return ret; -} - -static int mpc5200_wdt_open(struct inode *inode, struct file *file) -{ - /* /dev/watchdog can only be opened once */ - if (test_and_set_bit(0, &is_active)) - return -EBUSY; - - /* Set and activate the watchdog */ - mpc5200_wdt_set_timeout(wdt_global, 30); - mpc5200_wdt_start(wdt_global); - file->private_data = wdt_global; - return nonseekable_open(inode, file); -} -static int mpc5200_wdt_release(struct inode *inode, struct file *file) -{ -#if WATCHDOG_NOWAYOUT == 0 - struct mpc5200_wdt *wdt = file->private_data; - mpc5200_wdt_stop(wdt); - wdt->count = 0; /* == disabled */ -#endif - clear_bit(0, &is_active); - return 0; -} - -static const struct file_operations mpc5200_wdt_fops = { - .owner = THIS_MODULE, - .write = mpc5200_wdt_write, - .unlocked_ioctl = mpc5200_wdt_ioctl, - .open = mpc5200_wdt_open, - .release = mpc5200_wdt_release, -}; - -/* module operations */ -static int mpc5200_wdt_probe(struct of_device *op, - const struct of_device_id *match) -{ - struct mpc5200_wdt *wdt; - int err; - const void *has_wdt; - int size; - - has_wdt = of_get_property(op->node, "has-wdt", NULL); - if (!has_wdt) - has_wdt = of_get_property(op->node, "fsl,has-wdt", NULL); - if (!has_wdt) - return -ENODEV; - - wdt = kzalloc(sizeof(*wdt), GFP_KERNEL); - if (!wdt) - return -ENOMEM; - - wdt->ipb_freq = mpc5xxx_get_bus_frequency(op->node); - - err = of_address_to_resource(op->node, 0, &wdt->mem); - if (err) - goto out_free; - size = wdt->mem.end - wdt->mem.start + 1; - if (!request_mem_region(wdt->mem.start, size, "mpc5200_wdt")) { - err = -ENODEV; - goto out_free; - } - wdt->regs = ioremap(wdt->mem.start, size); - if (!wdt->regs) { - err = -ENODEV; - goto out_release; - } - - dev_set_drvdata(&op->dev, wdt); - spin_lock_init(&wdt->io_lock); - - wdt->miscdev = (struct miscdevice) { - .minor = WATCHDOG_MINOR, - .name = "watchdog", - .fops = &mpc5200_wdt_fops, - .parent = &op->dev, - }; - wdt_global = wdt; - err = misc_register(&wdt->miscdev); - if (!err) - return 0; - - iounmap(wdt->regs); -out_release: - release_mem_region(wdt->mem.start, size); -out_free: - kfree(wdt); - return err; -} - -static int mpc5200_wdt_remove(struct of_device *op) -{ - struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev); - - mpc5200_wdt_stop(wdt); - misc_deregister(&wdt->miscdev); - iounmap(wdt->regs); - release_mem_region(wdt->mem.start, wdt->mem.end - wdt->mem.start + 1); - kfree(wdt); - - return 0; -} -static int mpc5200_wdt_suspend(struct of_device *op, pm_message_t state) -{ - struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev); - mpc5200_wdt_stop(wdt); - return 0; -} -static int mpc5200_wdt_resume(struct of_device *op) -{ - struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev); - if (wdt->count) - mpc5200_wdt_start(wdt); - return 0; -} -static int mpc5200_wdt_shutdown(struct of_device *op) -{ - struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev); - mpc5200_wdt_stop(wdt); - return 0; -} - -static struct of_device_id mpc5200_wdt_match[] = { - { .compatible = "mpc5200-gpt", }, - { .compatible = "fsl,mpc5200-gpt", }, - {}, -}; -static struct of_platform_driver mpc5200_wdt_driver = { - .owner = THIS_MODULE, - .name = "mpc5200-gpt-wdt", - .match_table = mpc5200_wdt_match, - .probe = mpc5200_wdt_probe, - .remove = mpc5200_wdt_remove, - .suspend = mpc5200_wdt_suspend, - .resume = mpc5200_wdt_resume, - .shutdown = mpc5200_wdt_shutdown, -}; - - -static int __init mpc5200_wdt_init(void) -{ - return of_register_platform_driver(&mpc5200_wdt_driver); -} - -static void __exit mpc5200_wdt_exit(void) -{ - of_unregister_platform_driver(&mpc5200_wdt_driver); -} - -module_init(mpc5200_wdt_init); -module_exit(mpc5200_wdt_exit); - -MODULE_AUTHOR("Domen Puncer <domen.puncer@telargo.com>"); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 47536197ffdd..e287863ac053 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -43,6 +43,8 @@ extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); #ifdef CONFIG_HOTPLUG_CPU extern void unregister_cpu(struct cpu *cpu); +extern ssize_t arch_cpu_probe(const char *, size_t); +extern ssize_t arch_cpu_release(const char *, size_t); #endif struct notifier_block; @@ -115,6 +117,19 @@ extern void put_online_cpus(void); #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) int cpu_down(unsigned int cpu); +#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE +extern void cpu_hotplug_driver_lock(void); +extern void cpu_hotplug_driver_unlock(void); +#else +static inline void cpu_hotplug_driver_lock(void) +{ +} + +static inline void cpu_hotplug_driver_unlock(void) +{ +} +#endif + #else /* CONFIG_HOTPLUG_CPU */ #define get_online_cpus() do { } while (0) diff --git a/include/linux/kvm.h b/include/linux/kvm.h index f8f8900fc5ec..caf6173bd2e8 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -436,6 +436,9 @@ struct kvm_ioeventfd { #endif #define KVM_CAP_IOEVENTFD 36 #define KVM_CAP_SET_IDENTITY_MAP_ADDR 37 +/* KVM upstream has more features, but we synched this number. + Linux, please remove this comment on rebase. */ +#define KVM_CAP_PPC_SEGSTATE 43 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/include/linux/spi/mpc52xx_spi.h b/include/linux/spi/mpc52xx_spi.h new file mode 100644 index 000000000000..d1004cf09241 --- /dev/null +++ b/include/linux/spi/mpc52xx_spi.h @@ -0,0 +1,10 @@ + +#ifndef INCLUDE_MPC5200_SPI_H +#define INCLUDE_MPC5200_SPI_H + +extern void mpc52xx_spi_set_premessage_hook(struct spi_master *master, + void (*hook)(struct spi_message *m, + void *context), + void *hook_context); + +#endif |