From 7e302869e0c5261aba779e059cddcd2fbf7aedbe Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 6 Aug 2008 09:10:01 +1000 Subject: powerpc: Split-out common MSI bitmap logic into msi_bitmap.c There are now two almost identical implementations of an MSI bitmap allocator, one in mpic_msi.c and the other in fsl_msi.c. Merge them together and put the result in msi_bitmap.c. Some of the MPIC bits will remain to provide a nicer interface for the MPIC users. In the process we fix two buglets. The first is that the allocation routines, now msi_bitmap_alloc_hwirqs(), returned an unsigned result, even though they use -1 to indicate allocation failure. Although all the callers were checking correctly, it is much better for the routine to just return an int. At least until someone wants > ~2 billion MSIs. The second buglet is that the device tree reservation logic only allowed power-of-two reservations. AFAICT that didn't effect any existing code but it's nicer if we can reserve arbitrary irqs from MSI use. We also add some selftests, which exposed the two buglets and now test for them, as well as some basic sanity tests. The tests are only built when CONFIG_DEBUG_KERNEL=y. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/msi_bitmap.h | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 arch/powerpc/include/asm/msi_bitmap.h (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/msi_bitmap.h b/arch/powerpc/include/asm/msi_bitmap.h new file mode 100644 index 000000000000..97ac3f46ae0d --- /dev/null +++ b/arch/powerpc/include/asm/msi_bitmap.h @@ -0,0 +1,35 @@ +#ifndef _POWERPC_SYSDEV_MSI_BITMAP_H +#define _POWERPC_SYSDEV_MSI_BITMAP_H + +/* + * Copyright 2008, Michael Ellerman, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 of the + * License. + * + */ + +#include +#include + +struct msi_bitmap { + struct device_node *of_node; + unsigned long *bitmap; + spinlock_t lock; + unsigned int irq_count; +}; + +int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num); +void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset, + unsigned int num); +void msi_bitmap_reserve_hwirq(struct msi_bitmap *bmp, unsigned int hwirq); + +int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp); + +int msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, + struct device_node *of_node); +void msi_bitmap_free(struct msi_bitmap *bmp); + +#endif /* _POWERPC_SYSDEV_MSI_BITMAP_H */ -- cgit v1.2.1 From 25235f712b680d00756a73ee64289137989fc6fd Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 6 Aug 2008 09:10:03 +1000 Subject: powerpc: Convert the MPIC MSI code to use msi_bitmap This affects the U3 MSI code as well as the PASEMI MSI code. We keep some of the MPIC routines as helpers, and also the U3 best-guess reservation logic. The rest is replaced by the generic code. And a few printk format changes due to hwirq type change. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/mpic.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h index fe566a348a86..34d9ac433ace 100644 --- a/arch/powerpc/include/asm/mpic.h +++ b/arch/powerpc/include/asm/mpic.h @@ -5,6 +5,7 @@ #include #include #include +#include /* * Global registers @@ -301,8 +302,7 @@ struct mpic #endif #ifdef CONFIG_PCI_MSI - spinlock_t bitmap_lock; - unsigned long *hwirq_bitmap; + struct msi_bitmap msi_bitmap; #endif #ifdef CONFIG_MPIC_BROKEN_REGREAD -- cgit v1.2.1 From 0ec27c049d80535f77901654a310b090106b046c Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 7 Aug 2008 10:30:40 +1000 Subject: powerpc: Remove include of linux/of_platform.h from asm/of_platform.h Now that we have removed all inclusions of asm/of_platform.h, this compatibility include can be removed. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/of_platform.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/of_platform.h b/arch/powerpc/include/asm/of_platform.h index 18659ef72139..53b46507ffde 100644 --- a/arch/powerpc/include/asm/of_platform.h +++ b/arch/powerpc/include/asm/of_platform.h @@ -11,9 +11,6 @@ * */ -/* This is just here during the transition */ -#include - /* Platform drivers register/unregister */ static inline int of_register_platform_driver(struct of_platform_driver *drv) { -- cgit v1.2.1 From 41eba0ad0033967eda346dd833194e96fdf5f405 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 18 Aug 2008 14:23:48 +1000 Subject: powerpc: Turn get/set_hard_smp_proccessor_id into inlines They don't need to be macros, and having them as inline functions avoids warnings about unused variables on some configurations when the argument isn't evaluated. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/smp.h | 39 +++++++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 10 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 4d28e1e4521b..c092f84302fd 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -56,9 +56,16 @@ extern int smp_hw_index[]; #define raw_smp_processor_id() (current_thread_info()->cpu) #define hard_smp_processor_id() (smp_hw_index[smp_processor_id()]) -#define get_hard_smp_processor_id(cpu) (smp_hw_index[(cpu)]) -#define set_hard_smp_processor_id(cpu, phys)\ - (smp_hw_index[(cpu)] = (phys)) + +static inline int get_hard_smp_processor_id(int cpu) +{ + return smp_hw_index[cpu]; +} + +static inline void set_hard_smp_processor_id(int cpu, int phys) +{ + smp_hw_index[cpu] = phys; +} #endif DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); @@ -92,9 +99,15 @@ extern void __cpu_die(unsigned int cpu); #endif /* CONFIG_SMP */ #ifdef CONFIG_PPC64 -#define get_hard_smp_processor_id(CPU) (paca[(CPU)].hw_cpu_id) -#define set_hard_smp_processor_id(CPU, VAL) \ - do { (paca[(CPU)].hw_cpu_id = (VAL)); } while (0) +static inline int get_hard_smp_processor_id(int cpu) +{ + return paca[cpu].hw_cpu_id; +} + +static inline void set_hard_smp_processor_id(int cpu, int phys) +{ + paca[cpu].hw_cpu_id = phys; +} extern void smp_release_cpus(void); @@ -102,10 +115,16 @@ extern void smp_release_cpus(void); /* 32-bit */ #ifndef CONFIG_SMP extern int boot_cpuid_phys; -#define get_hard_smp_processor_id(cpu) boot_cpuid_phys -#define set_hard_smp_processor_id(cpu, phys) -#endif -#endif +static inline int get_hard_smp_processor_id(int cpu) +{ + return boot_cpuid_phys; +} + +static inline void set_hard_smp_processor_id(int cpu, int phys) +{ +} +#endif /* !CONFIG_SMP */ +#endif /* !CONFIG_PPC64 */ extern int smt_enabled_at_boot; -- cgit v1.2.1 From b950bdd0fc247d0ab4aea88d46e8cced3eac949e Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 18 Aug 2008 14:23:51 +1000 Subject: powerpc: Expose PMCs & cache topology in sysfs on 32-bit The file arch/powerpc/kernel/sysfs.c is currently only compiled for 64-bit kernels. It contain code to register CPU sysdevs in sysfs and add various properties such as cache topology and raw access by root to performance monitor counters (PMCs). A lot of that can be re-used as is on 32-bits. This makes the file be built for both, with appropriate ifdef'ing for the few bits that are really 64-bit specific, and adds some support for the raw PMCs for 75x and 74xx processors. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/cputable.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index ef8a248dfd55..f99813f1ede6 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -62,6 +62,7 @@ enum powerpc_pmc_type { PPC_PMC_DEFAULT = 0, PPC_PMC_IBM = 1, PPC_PMC_PA6T = 2, + PPC_PMC_G4 = 3, }; struct pt_regs; -- cgit v1.2.1 From 7713fef06517d216f96ee7c8ad750e72bc08d38f Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 19 Aug 2008 18:19:25 +1000 Subject: powerpc: Remove include of linux/of_device.h from asm/of_device.h Now that we have removed all inclusions of asm/of_device.h, this compatability include can be removed. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/of_device.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/of_device.h b/arch/powerpc/include/asm/of_device.h index 3c123990ca2e..a64debf177dc 100644 --- a/arch/powerpc/include/asm/of_device.h +++ b/arch/powerpc/include/asm/of_device.h @@ -24,8 +24,5 @@ extern struct of_device *of_device_alloc(struct device_node *np, extern int of_device_uevent(struct device *dev, struct kobj_uevent_env *env); -/* This is just here during the transition */ -#include - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_OF_DEVICE_H */ -- cgit v1.2.1 From 15e2fc9bbf946f999c78fe24cb1bc03c7256acab Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 1 Sep 2008 14:11:47 +0100 Subject: PowerPC: Use Signed-off-by: David Woodhouse --- arch/powerpc/include/asm/statfs.h | 54 --------------------------------------- 1 file changed, 54 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/statfs.h b/arch/powerpc/include/asm/statfs.h index 67024026c10d..5244834583a4 100644 --- a/arch/powerpc/include/asm/statfs.h +++ b/arch/powerpc/include/asm/statfs.h @@ -1,60 +1,6 @@ #ifndef _ASM_POWERPC_STATFS_H #define _ASM_POWERPC_STATFS_H -/* For ppc32 we just use the generic definitions, not so simple on ppc64 */ - -#ifndef __powerpc64__ #include -#else - -#ifndef __KERNEL_STRICT_NAMES -#include -typedef __kernel_fsid_t fsid_t; -#endif - -/* - * We're already 64-bit, so duplicate the definition - */ -struct statfs { - long f_type; - long f_bsize; - long f_blocks; - long f_bfree; - long f_bavail; - long f_files; - long f_ffree; - __kernel_fsid_t f_fsid; - long f_namelen; - long f_frsize; - long f_spare[5]; -}; - -struct statfs64 { - long f_type; - long f_bsize; - long f_blocks; - long f_bfree; - long f_bavail; - long f_files; - long f_ffree; - __kernel_fsid_t f_fsid; - long f_namelen; - long f_frsize; - long f_spare[5]; -}; -struct compat_statfs64 { - __u32 f_type; - __u32 f_bsize; - __u64 f_blocks; - __u64 f_bfree; - __u64 f_bavail; - __u64 f_files; - __u64 f_ffree; - __kernel_fsid_t f_fsid; - __u32 f_namelen; - __u32 f_frsize; - __u32 f_spare[5]; -}; -#endif /* ! __powerpc64__ */ #endif -- cgit v1.2.1 From 9d5a9e74655b9d04d0ec9c8e47801163b7b74211 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Sat, 28 Jun 2008 00:12:52 +0300 Subject: Remove asm/a.out.h files for all architectures without a.out support. This patch also includes the required removal of (unused) inclusion of 's in the arch/ code for these architectures. [dwmw2: updated for 2.6.27-rc] Signed-off-by: Adrian Bunk Signed-off-by: David Woodhouse --- arch/powerpc/include/asm/a.out.h | 20 -------------------- 1 file changed, 20 deletions(-) delete mode 100644 arch/powerpc/include/asm/a.out.h (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/a.out.h b/arch/powerpc/include/asm/a.out.h deleted file mode 100644 index 89cead6b176e..000000000000 --- a/arch/powerpc/include/asm/a.out.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _ASM_POWERPC_A_OUT_H -#define _ASM_POWERPC_A_OUT_H - -struct exec -{ - unsigned long a_info; /* Use macros N_MAGIC, etc for access */ - unsigned a_text; /* length of text, in bytes */ - unsigned a_data; /* length of data, in bytes */ - unsigned a_bss; /* length of uninitialized data area for file, in bytes */ - unsigned a_syms; /* length of symbol table data in file, in bytes */ - unsigned a_entry; /* start address */ - unsigned a_trsize; /* length of relocation info for text, in bytes */ - unsigned a_drsize; /* length of relocation info for data, in bytes */ -}; - -#define N_TRSIZE(a) ((a).a_trsize) -#define N_DRSIZE(a) ((a).a_drsize) -#define N_SYMSIZE(a) ((a).a_syms) - -#endif /* _ASM_POWERPC_A_OUT_H */ -- cgit v1.2.1 From 600715dcdf567c86f8b2c6173fcfb4b873e25a19 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 11 Sep 2008 01:31:45 -0700 Subject: generic: add phys_addr_t for holding physical addresses Add a kernel-wide "phys_addr_t" which is guaranteed to be able to hold any physical address. By default it equals the word size of the architecture, but a 32-bit architecture can set ARCH_PHYS_ADDR_T_64BIT if it needs a 64-bit phys_addr_t. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/types.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h index d3374bc865ba..c646f34c4e8b 100644 --- a/arch/powerpc/include/asm/types.h +++ b/arch/powerpc/include/asm/types.h @@ -48,13 +48,6 @@ typedef struct { typedef __vector128 vector128; -/* Physical address used by some IO functions */ -#if defined(CONFIG_PPC64) || defined(CONFIG_PHYS_64BIT) -typedef u64 phys_addr_t; -#else -typedef u32 phys_addr_t; -#endif - #ifdef __powerpc64__ typedef u64 dma_addr_t; #else -- cgit v1.2.1 From 2a9294369bd020db89bfdf78b84c3615b39a5c84 Mon Sep 17 00:00:00 2001 From: Mark Nelson Date: Fri, 22 Aug 2008 14:36:19 +1000 Subject: powerpc: Add new CPU feature: CPU_FTR_CP_USE_DCBTZ Add a new CPU feature bit, CPU_FTR_CP_USE_DCBTZ, to be added to the 64bit powerpc chips that benefit from having dcbt and dcbz instructions used in their memory copy routines. This will be used in a subsequent patch that updates copy_4K_page(). The new bit is added to Cell, PPC970 and Power4 because they show better performance with the new copy_4K_page() when dcbt and dcbz instructions are used. Signed-off-by: Mark Nelson Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/cputable.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index f99813f1ede6..1e94b07a020e 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -193,6 +193,7 @@ extern const char *powerpc_base_platform; #define CPU_FTR_NO_SLBIE_B LONG_ASM_CONST(0x0008000000000000) #define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000) #define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000) +#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000) #ifndef __ASSEMBLY__ @@ -388,10 +389,11 @@ extern const char *powerpc_base_platform; CPU_FTR_MMCRA | CPU_FTR_CTRL) #define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ - CPU_FTR_MMCRA) + CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ) #define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ - CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA) + CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ + CPU_FTR_CP_USE_DCBTZ) #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ @@ -412,7 +414,8 @@ extern const char *powerpc_base_platform; #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ - CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_CELL_TB_BUG) + CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | \ + CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ) #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ -- cgit v1.2.1 From 1f6a93e4c35e75d547b51f56ba8139ab1a91628c Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sat, 30 Aug 2008 11:40:24 +1000 Subject: powerpc: Make it possible to move the interrupt handlers away from the kernel This changes the way that the exception prologs transfer control to the handlers in 64-bit kernels with the aim of making it possible to have the prologs separate from the main body of the kernel. Now, instead of computing the address of the handler by taking the top 32 bits of the paca address (to get the 0xc0000000........ part) and ORing in something in the bottom 16 bits, we get the base address of the kernel by doing a load from the paca and add an offset. This also replaces an mfmsr and an ori to compute the MSR value for the handler with a load from the paca. That makes it unnecessary to have a separate version of EXCEPTION_PROLOG_PSERIES that forces 64-bit mode. We can no longer use a direct branches in the exception prolog code, which means that the SLB miss handlers can't branch directly to .slb_miss_realmode any more. Instead we have to compute the address and do an indirect branch. This is conditional on CONFIG_RELOCATABLE; for non-relocatable kernels we use a direct branch as before. (A later change will allow CONFIG_RELOCATABLE to be set on 64-bit powerpc.) Since the secondary CPUs on pSeries start execution in the first 0x100 bytes of real memory and then have to get to wherever the kernel is, we can't use a direct branch to get there. Instead this changes __secondary_hold_spinloop from a flag to a function pointer. When it is set to a non-NULL value, the secondary CPUs jump to the function pointed to by that value. Finally this eliminates one code difference between 32-bit and 64-bit by making __secondary_hold be the text address of the secondary CPU spinloop rather than a function descriptor for it. Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/exception.h | 42 +++++------------------------------- arch/powerpc/include/asm/paca.h | 2 ++ 2 files changed, 7 insertions(+), 37 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/exception.h b/arch/powerpc/include/asm/exception.h index 329148b5acc6..d3d4534e3c74 100644 --- a/arch/powerpc/include/asm/exception.h +++ b/arch/powerpc/include/asm/exception.h @@ -53,14 +53,8 @@ * low halfword of the address, but for Kdump we need the whole low * word. */ -#ifdef CONFIG_CRASH_DUMP #define LOAD_HANDLER(reg, label) \ - oris reg,reg,(label)@h; /* virt addr of handler ... */ \ - ori reg,reg,(label)@l; /* .. and the rest */ -#else -#define LOAD_HANDLER(reg, label) \ - ori reg,reg,(label)@l; /* virt addr of handler ... */ -#endif + addi reg,reg,(label)-_stext; /* virt addr of handler ... */ #define EXCEPTION_PROLOG_1(area) \ mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ @@ -72,37 +66,12 @@ std r9,area+EX_R13(r13); \ mfcr r9 -/* - * Equal to EXCEPTION_PROLOG_PSERIES, except that it forces 64bit mode. - * The firmware calls the registered system_reset_fwnmi and - * machine_check_fwnmi handlers in 32bit mode if the cpu happens to run - * a 32bit application at the time of the event. - * This firmware bug is present on POWER4 and JS20. - */ -#define EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(area, label) \ - EXCEPTION_PROLOG_1(area); \ - clrrdi r12,r13,32; /* get high part of &label */ \ - mfmsr r10; \ - /* force 64bit mode */ \ - li r11,5; /* MSR_SF_LG|MSR_ISF_LG */ \ - rldimi r10,r11,61,0; /* insert into top 3 bits */ \ - /* done 64bit mode */ \ - mfspr r11,SPRN_SRR0; /* save SRR0 */ \ - LOAD_HANDLER(r12,label) \ - ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ - mtspr SPRN_SRR0,r12; \ - mfspr r12,SPRN_SRR1; /* and SRR1 */ \ - mtspr SPRN_SRR1,r10; \ - rfid; \ - b . /* prevent speculative execution */ - #define EXCEPTION_PROLOG_PSERIES(area, label) \ EXCEPTION_PROLOG_1(area); \ - clrrdi r12,r13,32; /* get high part of &label */ \ - mfmsr r10; \ + ld r12,PACAKBASE(r13); /* get high part of &label */ \ + ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ mfspr r11,SPRN_SRR0; /* save SRR0 */ \ LOAD_HANDLER(r12,label) \ - ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ mtspr SPRN_SRR0,r12; \ mfspr r12,SPRN_SRR1; /* and SRR1 */ \ mtspr SPRN_SRR1,r10; \ @@ -210,11 +179,10 @@ label##_pSeries: \ std r10,PACA_EXGEN+EX_R13(r13); \ std r11,PACA_EXGEN+EX_R11(r13); \ std r12,PACA_EXGEN+EX_R12(r13); \ - clrrdi r12,r13,32; /* get high part of &label */ \ - mfmsr r10; \ + ld r12,PACAKBASE(r13); /* get high part of &label */ \ + ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ mfspr r11,SPRN_SRR0; /* save SRR0 */ \ LOAD_HANDLER(r12,label##_common) \ - ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ mtspr SPRN_SRR0,r12; \ mfspr r12,SPRN_SRR1; /* and SRR1 */ \ mtspr SPRN_SRR1,r10; \ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 6493a395508b..082b3aedf145 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -62,6 +62,8 @@ struct paca_struct { u16 paca_index; /* Logical processor number */ u64 kernel_toc; /* Kernel TOC address */ + u64 kernelbase; /* Base address of kernel */ + u64 kernel_msr; /* MSR while running in kernel */ u64 stab_real; /* Absolute address of segment table */ u64 stab_addr; /* Virtual address of segment table */ void *emergency_sp; /* pointer to emergency stack */ -- cgit v1.2.1 From e31aa453bbc4886a7bd33e5c2afa526d6f55bd7a Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sat, 30 Aug 2008 11:41:12 +1000 Subject: powerpc: Use LOAD_REG_IMMEDIATE only for constants on 64-bit Using LOAD_REG_IMMEDIATE to get the address of kernel symbols generates 5 instructions where LOAD_REG_ADDR can do it in one, and will generate R_PPC64_ADDR16_* relocations in the output when we get to making the kernel as a position-independent executable, which we'd rather not have to handle. This changes various bits of assembly code to use LOAD_REG_ADDR when we need to get the address of a symbol, or to use suitable position-independent code for cases where we can't access the TOC for various reasons, or if we're not running at the address we were linked at. It also cleans up a few minor things; there's no reason to save and restore SRR0/1 around RTAS calls, __mmu_off can get the return address from LR more conveniently than the caller can supply it in R4 (and we already assume elsewhere that EA == RA if the MMU is on in early boot), and enable_64b_mode was using 5 instructions where 2 would do. Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/ppc_asm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 0966899d974b..c4a029ccb4d3 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -268,7 +268,7 @@ n: * Loads the value of the constant expression 'expr' into register 'rn' * using immediate instructions only. Use this when it's important not * to reference other data (i.e. on ppc64 when the TOC pointer is not - * valid). + * valid) and when 'expr' is a constant or absolute address. * * LOAD_REG_ADDR(rn, name) * Loads the address of label 'name' into register 'rn'. Use this when -- cgit v1.2.1 From 549e8152de8039506f69c677a4546e5427aa6ae7 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sat, 30 Aug 2008 11:43:47 +1000 Subject: powerpc: Make the 64-bit kernel as a position-independent executable This implements CONFIG_RELOCATABLE for 64-bit by making the kernel as a position-independent executable (PIE) when it is set. This involves processing the dynamic relocations in the image in the early stages of booting, even if the kernel is being run at the address it is linked at, since the linker does not necessarily fill in words in the image for which there are dynamic relocations. (In fact the linker does fill in such words for 64-bit executables, though not for 32-bit executables, so in principle we could avoid calling relocate() entirely when we're running a 64-bit kernel at the linked address.) The dynamic relocations are processed by a new function relocate(addr), where the addr parameter is the virtual address where the image will be run. In fact we call it twice; once before calling prom_init, and again when starting the main kernel. This means that reloc_offset() returns 0 in prom_init (since it has been relocated to the address it is running at), which necessitated a few adjustments. This also changes __va and __pa to use an equivalent definition that is simpler. With the relocatable kernel, PAGE_OFFSET and MEMORY_START are constants (for 64-bit) whereas PHYSICAL_START is a variable (and KERNELBASE ideally should be too, but isn't yet). With this, relocatable kernels still copy themselves down to physical address 0 and run there. Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/mmu-hash64.h | 2 +- arch/powerpc/include/asm/page.h | 14 ++++++++++---- arch/powerpc/include/asm/sections.h | 6 ++++++ 3 files changed, 17 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index c2df53c5ceb9..5a441742ffba 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -437,7 +437,7 @@ typedef struct { }) #endif /* 1 */ -/* This is only valid for addresses >= KERNELBASE */ +/* This is only valid for addresses >= PAGE_OFFSET */ static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) { if (ssize == MMU_SEGSIZE_256M) diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index e088545cb3f5..64e144505f65 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -71,15 +71,21 @@ #define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET) #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START)) -#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_FLATMEM) +#if defined(CONFIG_RELOCATABLE) #ifndef __ASSEMBLY__ extern phys_addr_t memstart_addr; extern phys_addr_t kernstart_addr; #endif #define PHYSICAL_START kernstart_addr -#define MEMORY_START memstart_addr #else #define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START) +#endif + +#ifdef CONFIG_PPC64 +#define MEMORY_START 0UL +#elif defined(CONFIG_RELOCATABLE) +#define MEMORY_START memstart_addr +#else #define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE) #endif @@ -92,8 +98,8 @@ extern phys_addr_t kernstart_addr; #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) -#define __va(x) ((void *)((unsigned long)(x) - PHYSICAL_START + KERNELBASE)) -#define __pa(x) ((unsigned long)(x) + PHYSICAL_START - KERNELBASE) +#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - MEMORY_START)) +#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) /* * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 7710e9e6660f..baf318aec533 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -16,6 +16,12 @@ static inline int in_kernel_text(unsigned long addr) return 0; } +static inline int overlaps_kernel_text(unsigned long start, unsigned long end) +{ + return start < (unsigned long)__init_end && + (unsigned long)_stext < end; +} + #undef dereference_function_descriptor void *dereference_function_descriptor(void *); -- cgit v1.2.1 From d6c93adbeb98c56e19f7df37633566b39fbcd4c9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 1 Sep 2008 11:23:30 +1000 Subject: powerpc: Use sys_pause for 32-bit pause entry point sys32_pause is a useless copy of the generic sys_pause. Signed-off-by: Christoph Hellwig Acked-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/systbl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index f6cc7a43b4fa..803def236654 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -32,7 +32,7 @@ COMPAT_SYS_SPU(stime) COMPAT_SYS(ptrace) SYSCALL_SPU(alarm) OLDSYS(fstat) -COMPAT_SYS(pause) +SYSCALL(pause) COMPAT_SYS(utime) SYSCALL(ni_syscall) SYSCALL(ni_syscall) -- cgit v1.2.1 From 967e012ef306e99cfddcd7423f37414e6b568361 Mon Sep 17 00:00:00 2001 From: Sebastien Dugue Date: Thu, 4 Sep 2008 22:37:07 +1000 Subject: powerpc: Separate the irq radix tree insertion and lookup irq_radix_revmap() currently serves 2 purposes, irq mapping lookup and insertion which happen in interrupt and process context respectively. Separate the function into its 2 components, one for lookup only and one for insertion only. Fix the only user of the revmap tree (XICS) to use the new functions. Also, move the insertion into the radix tree of those irqs that were requested before it was initialized at said tree initialization. Mutual exclusion between the tree initialization and readers/writers is handled via a state variable (revmap_trees_allocated) set to 1 when the tree has been initialized and set to 2 after the already requested irqs have been inserted in the tree by the init path. This state is checked before any reader or writer access just like we used to check for tree.gfp_mask != 0 before. Finally, now that we're not any longer inserting nodes into the radix-tree in interrupt context, turn the GFP_ATOMIC allocations into GFP_KERNEL ones. Signed-off-by: Sebastien Dugue Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/irq.h | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index a372f76836c2..0a5137676e1b 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -236,15 +236,27 @@ extern unsigned int irq_find_mapping(struct irq_host *host, extern unsigned int irq_create_direct_mapping(struct irq_host *host); /** - * irq_radix_revmap - Find a linux virq from a hw irq number. + * irq_radix_revmap_insert - Insert a hw irq to linux virq number mapping. + * @host: host owning this hardware interrupt + * @virq: linux irq number + * @hwirq: hardware irq number in that host space + * + * This is for use by irq controllers that use a radix tree reverse + * mapping for fast lookup. + */ +extern void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, + irq_hw_number_t hwirq); + +/** + * irq_radix_revmap_lookup - Find a linux virq from a hw irq number. * @host: host owning this hardware interrupt * @hwirq: hardware irq number in that host space * * This is a fast path, for use by irq controller code that uses radix tree * revmaps */ -extern unsigned int irq_radix_revmap(struct irq_host *host, - irq_hw_number_t hwirq); +extern unsigned int irq_radix_revmap_lookup(struct irq_host *host, + irq_hw_number_t hwirq); /** * irq_linear_revmap - Find a linux virq from a hw irq number. -- cgit v1.2.1 From da654b74bda14c45a7d98c731bf3c1a43b6b74e2 Mon Sep 17 00:00:00 2001 From: Srinivasa Ds Date: Tue, 23 Sep 2008 15:23:52 +0530 Subject: signals: demultiplexing SIGTRAP signal Currently a SIGTRAP can denote any one of below reasons. - Breakpoint hit - H/W debug register hit - Single step - Signal sent through kill() or rasie() Architectures like powerpc/parisc provides infrastructure to demultiplex SIGTRAP signal by passing down the information for receiving SIGTRAP through si_code of siginfot_t structure. Here is an attempt is generalise this infrastructure by extending it to x86 and x86_64 archs. Signed-off-by: Srinivasa DS Cc: Roland McGrath Cc: akpm@linux-foundation.org Cc: paulus@samba.org Cc: linuxppc-dev@ozlabs.org Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/siginfo.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/siginfo.h b/arch/powerpc/include/asm/siginfo.h index 12f1bce037be..49495b0534ed 100644 --- a/arch/powerpc/include/asm/siginfo.h +++ b/arch/powerpc/include/asm/siginfo.h @@ -15,11 +15,6 @@ #include -/* - * SIGTRAP si_codes - */ -#define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */ -#define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint or watchpoint */ #undef NSIGTRAP #define NSIGTRAP 4 -- cgit v1.2.1 From 8fae0353247530d2124b2419052fa6120462fa99 Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Mon, 8 Sep 2008 09:09:54 +0000 Subject: powerpc: Drop archdata numa_node Use the struct device's numa_node instead; use accessor functions to get/set numa_node. Signed-off-by: Becky Bruce Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/device.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h index 228ab2a315b9..dfd504caccc1 100644 --- a/arch/powerpc/include/asm/device.h +++ b/arch/powerpc/include/asm/device.h @@ -16,9 +16,6 @@ struct dev_archdata { /* DMA operations on that device */ struct dma_mapping_ops *dma_ops; void *dma_data; - - /* NUMA node if applicable */ - int numa_node; }; #endif /* _ASM_POWERPC_DEVICE_H */ -- cgit v1.2.1 From 4fc665b88a79a45bae8bbf3a05563c27c7337c3d Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Fri, 12 Sep 2008 10:34:46 +0000 Subject: powerpc: Merge 32 and 64-bit dma code We essentially adopt the 64-bit dma code, with some changes to support 32-bit systems, including HIGHMEM. dma functions on 32-bit are now invoked via accessor functions which call the correct op for a device based on archdata dma_ops. If there is no archdata dma_ops, this defaults to dma_direct_ops. In addition, the dma_map/unmap_page functions are added to dma_ops because we can't just fall back on map/unmap_single when HIGHMEM is enabled. In the case of dma_direct_*, we stop using map/unmap_single and just use the page version - this saves a lot of ugly ifdeffing. We leave map/unmap_single in the dma_ops definition, though, because they are needed by the iommu code, which does not implement map/unmap_page. Ideally, going forward, we will completely eliminate map/unmap_single and just have map/unmap_page, if it's workable for 64-bit. Signed-off-by: Becky Bruce Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/dma-mapping.h | 187 +++++++++++---------------------- arch/powerpc/include/asm/machdep.h | 5 +- arch/powerpc/include/asm/pci.h | 14 +-- 3 files changed, 72 insertions(+), 134 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index c7ca45f97dd2..fddb229bd74f 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -44,8 +44,6 @@ extern void __dma_sync_page(struct page *page, unsigned long offset, #endif /* ! CONFIG_NOT_COHERENT_CACHE */ -#ifdef CONFIG_PPC64 - static inline unsigned long device_to_mask(struct device *dev) { if (dev->dma_mask && *dev->dma_mask) @@ -76,8 +74,24 @@ struct dma_mapping_ops { struct dma_attrs *attrs); int (*dma_supported)(struct device *dev, u64 mask); int (*set_dma_mask)(struct device *dev, u64 dma_mask); + dma_addr_t (*map_page)(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs); + void (*unmap_page)(struct device *dev, + dma_addr_t dma_address, size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs); }; +/* + * Available generic sets of operations + */ +#ifdef CONFIG_PPC64 +extern struct dma_mapping_ops dma_iommu_ops; +#endif +extern struct dma_mapping_ops dma_direct_ops; + static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) { /* We don't handle the NULL dev case for ISA for now. We could @@ -85,8 +99,19 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) * only ISA DMA device we support is the floppy and we have a hack * in the floppy driver directly to get a device for us. */ - if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL)) + + if (unlikely(dev == NULL) || dev->archdata.dma_ops == NULL) { +#ifdef CONFIG_PPC64 return NULL; +#else + /* Use default on 32-bit if dma_ops is not set up */ + /* TODO: Long term, we should fix drivers so that dev and + * archdata dma_ops are set up for all buses. + */ + return &dma_direct_ops; +#endif + } + return dev->archdata.dma_ops; } @@ -123,6 +148,12 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) return 0; } +/* + * TODO: map_/unmap_single will ideally go away, to be completely + * replaced by map/unmap_page. Until then, we allow dma_ops to have + * one or the other, or both by checking to see if the specific + * function requested exists; and if not, falling back on the other set. + */ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *cpu_addr, size_t size, @@ -132,7 +163,14 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - return dma_ops->map_single(dev, cpu_addr, size, direction, attrs); + + if (dma_ops->map_single) + return dma_ops->map_single(dev, cpu_addr, size, direction, + attrs); + + return dma_ops->map_page(dev, virt_to_page(cpu_addr), + (unsigned long)cpu_addr % PAGE_SIZE, size, + direction, attrs); } static inline void dma_unmap_single_attrs(struct device *dev, @@ -144,7 +182,13 @@ static inline void dma_unmap_single_attrs(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->unmap_single(dev, dma_addr, size, direction, attrs); + + if (dma_ops->unmap_single) { + dma_ops->unmap_single(dev, dma_addr, size, direction, attrs); + return; + } + + dma_ops->unmap_page(dev, dma_addr, size, direction, attrs); } static inline dma_addr_t dma_map_page_attrs(struct device *dev, @@ -156,8 +200,13 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); + + if (dma_ops->map_page) + return dma_ops->map_page(dev, page, offset, size, direction, + attrs); + return dma_ops->map_single(dev, page_address(page) + offset, size, - direction, attrs); + direction, attrs); } static inline void dma_unmap_page_attrs(struct device *dev, @@ -169,6 +218,12 @@ static inline void dma_unmap_page_attrs(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); + + if (dma_ops->unmap_page) { + dma_ops->unmap_page(dev, dma_address, size, direction, attrs); + return; + } + dma_ops->unmap_single(dev, dma_address, size, direction, attrs); } @@ -253,126 +308,6 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); } -/* - * Available generic sets of operations - */ -extern struct dma_mapping_ops dma_iommu_ops; -extern struct dma_mapping_ops dma_direct_ops; - -#else /* CONFIG_PPC64 */ - -#define dma_supported(dev, mask) (1) - -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - - return 0; -} - -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, - gfp_t gfp) -{ -#ifdef CONFIG_NOT_COHERENT_CACHE - return __dma_alloc_coherent(size, dma_handle, gfp); -#else - void *ret; - /* ignore region specifiers */ - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); - - if (dev == NULL || dev->coherent_dma_mask < 0xffffffff) - gfp |= GFP_DMA; - - ret = (void *)__get_free_pages(gfp, get_order(size)); - - if (ret != NULL) { - memset(ret, 0, size); - *dma_handle = virt_to_bus(ret); - } - - return ret; -#endif -} - -static inline void -dma_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) -{ -#ifdef CONFIG_NOT_COHERENT_CACHE - __dma_free_coherent(size, vaddr); -#else - free_pages((unsigned long)vaddr, get_order(size)); -#endif -} - -static inline dma_addr_t -dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - __dma_sync(ptr, size, direction); - - return virt_to_bus(ptr); -} - -static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, - enum dma_data_direction direction) -{ - /* We do nothing. */ -} - -static inline dma_addr_t -dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - __dma_sync_page(page, offset, size, direction); - - return page_to_bus(page) + offset; -} - -static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, - size_t size, - enum dma_data_direction direction) -{ - /* We do nothing. */ -} - -static inline int -dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents, - enum dma_data_direction direction) -{ - struct scatterlist *sg; - int i; - - BUG_ON(direction == DMA_NONE); - - for_each_sg(sgl, sg, nents, i) { - BUG_ON(!sg_page(sg)); - __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); - sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset; - } - - return nents; -} - -static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nhwentries, - enum dma_data_direction direction) -{ - /* We don't do anything here. */ -} - -#endif /* CONFIG_PPC64 */ - static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 893aafd87fde..2740c44ff717 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -88,8 +88,6 @@ struct machdep_calls { unsigned long (*tce_get)(struct iommu_table *tbl, long index); void (*tce_flush)(struct iommu_table *tbl); - void (*pci_dma_dev_setup)(struct pci_dev *dev); - void (*pci_dma_bus_setup)(struct pci_bus *bus); void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, unsigned long flags); @@ -101,6 +99,9 @@ struct machdep_calls { #endif #endif /* CONFIG_PPC64 */ + void (*pci_dma_dev_setup)(struct pci_dev *dev); + void (*pci_dma_bus_setup)(struct pci_bus *bus); + int (*probe)(void); void (*setup_arch)(void); /* Optional, may be NULL */ void (*init_early)(void); diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index a05a942b1c25..0e52c7828ea4 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -60,6 +60,14 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) return channel ? 15 : 14; } +#ifdef CONFIG_PCI +extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops); +extern struct dma_mapping_ops *get_pci_dma_ops(void); +#else /* CONFIG_PCI */ +#define set_pci_dma_ops(d) +#define get_pci_dma_ops() NULL +#endif + #ifdef CONFIG_PPC64 /* @@ -70,9 +78,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) #define PCI_DISABLE_MWI #ifdef CONFIG_PCI -extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops); -extern struct dma_mapping_ops *get_pci_dma_ops(void); - static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, unsigned long *strategy_parameter) @@ -89,9 +94,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, *strat = PCI_DMA_BURST_MULTIPLE; *strategy_parameter = cacheline_size; } -#else /* CONFIG_PCI */ -#define set_pci_dma_ops(d) -#define get_pci_dma_ops() NULL #endif #else /* 32-bit */ -- cgit v1.2.1 From b9579689ad3a208c342aed806afc7a9a808c4e1e Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Fri, 12 Sep 2008 10:53:43 +0000 Subject: powerpc: Make dma_addr_t a u64 if CONFIG_PHYS_64BIT is set Signed-off-by: Becky Bruce Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h index d3374bc865ba..a9a9262e84a3 100644 --- a/arch/powerpc/include/asm/types.h +++ b/arch/powerpc/include/asm/types.h @@ -55,7 +55,7 @@ typedef u64 phys_addr_t; typedef u32 phys_addr_t; #endif -#ifdef __powerpc64__ +#if defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT) typedef u64 dma_addr_t; #else typedef u32 dma_addr_t; -- cgit v1.2.1 From 0ba3418b8b1c85ee1771c63f1dd12041614e56ff Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Tue, 15 Jul 2008 16:12:25 -0500 Subject: powerpc: Introduce local (non-broadcast) forms of tlb invalidates Introduced a new set of low level tlb invalidate functions that do not broadcast invalidates on the bus: _tlbil_all - invalidate all _tlbil_pid - invalidate based on process id (or mm context) _tlbil_va - invalidate based on virtual address (ea + pid) On non-SMP configs _tlbil_all should be functionally equivalent to _tlbia and _tlbil_va should be functionally equivalent to _tlbie. The intent of this change is to handle SMP based invalidates via IPIs instead of broadcasts as the mechanism scales better for larger number of cores. On e500 (fsl-booke mmu) based cores move to using MMUCSR for invalidate alls and tlbsx/tlbwe for invalidate virtual address. Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/reg_booke.h | 7 +++++++ arch/powerpc/include/asm/tlbflush.h | 13 ++++++++----- 2 files changed, 15 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index be980f4ee495..67453766bff1 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -109,6 +109,7 @@ #define SPRN_EVPR 0x3D6 /* Exception Vector Prefix Register */ #define SPRN_L1CSR0 0x3F2 /* L1 Cache Control and Status Register 0 */ #define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */ +#define SPRN_MMUCSR0 0x3F4 /* MMU Control and Status Register 0 */ #define SPRN_PIT 0x3DB /* Programmable Interval Timer */ #define SPRN_BUCSR 0x3F5 /* Branch Unit Control and Status */ #define SPRN_L2CSR0 0x3F9 /* L2 Data Cache Control and Status Register 0 */ @@ -410,6 +411,12 @@ #define L2CSR0_L2LOA 0x00000080 /* L2 Cache Lock Overflow Allocate */ #define L2CSR0_L2LO 0x00000020 /* L2 Cache Lock Overflow */ +/* Bit definitions for MMUCSR0 */ +#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */ +#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */ +#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */ +#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */ + /* Bit definitions for SGR. */ #define SGR_NORMAL 0 /* Speculative fetching allowed. */ #define SGR_GUARDED 1 /* Speculative fetching disallowed. */ diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index 361cd5c7a32b..a2c6bfd85fb7 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h @@ -29,6 +29,9 @@ #include extern void _tlbie(unsigned long address, unsigned int pid); +extern void _tlbil_all(void); +extern void _tlbil_pid(unsigned int pid); +extern void _tlbil_va(unsigned long address, unsigned int pid); #if defined(CONFIG_40x) || defined(CONFIG_8xx) #define _tlbia() asm volatile ("tlbia; sync" : : : "memory") @@ -38,31 +41,31 @@ extern void _tlbia(void); static inline void flush_tlb_mm(struct mm_struct *mm) { - _tlbia(); + _tlbil_pid(mm->context.id); } static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { - _tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0); + _tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0); } static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long vmaddr) { - _tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0); + flush_tlb_page(vma, vmaddr); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - _tlbia(); + _tlbil_pid(vma->vm_mm->context.id); } static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - _tlbia(); + _tlbil_pid(0); } #elif defined(CONFIG_PPC32) -- cgit v1.2.1 From 9bf2b5cdc5fe5e3136ceeecc85141765023ded6e Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 16 Jul 2008 15:54:21 -0500 Subject: powerpc: Fixes for CONFIG_PTE_64BIT for SMP support There are some minor issues with support 64-bit PTEs on a 32-bit processor when dealing with SMP. * We need to order the stores in set_pte_at to make sure the flag word is set second. * Change pte_clear to use pte_update so only the flag word is cleared * Added a WARN_ON to set_pte_at to ensure the pte isn't present for the 64-bit pte/SMP case (to ensure our assumption of this fact). Signed-off-by: Kumar Gala Acked-by: Becky Bruce --- arch/powerpc/include/asm/highmem.h | 2 +- arch/powerpc/include/asm/pgtable-ppc32.h | 25 ++++++++++++++++++++----- 2 files changed, 21 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index 5d99b6489d56..91c589520c0a 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h @@ -84,7 +84,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(*(kmap_pte-idx))); #endif - set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); + __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); flush_tlb_page(NULL, vaddr); return (void*) vaddr; diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index 6fe39e327047..1b1195555473 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -517,7 +517,8 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) -#define pte_clear(mm,addr,ptep) do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0) +#define pte_clear(mm, addr, ptep) \ + do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) @@ -612,9 +613,6 @@ static inline unsigned long pte_update(pte_t *p, return old; } #else /* CONFIG_PTE_64BIT */ -/* TODO: Change that to only modify the low word and move set_pte_at() - * out of line - */ static inline unsigned long long pte_update(pte_t *p, unsigned long clr, unsigned long set) @@ -652,16 +650,33 @@ static inline unsigned long long pte_update(pte_t *p, * On machines which use an MMU hash table we avoid changing the * _PAGE_HASHPTE bit. */ -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + +static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { #if _PAGE_HASHPTE != 0 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE); +#elif defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) + __asm__ __volatile__("\ + stw%U0%X0 %2,%0\n\ + eieio\n\ + stw%U0%X0 %L2,%1" + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) + : "r" (pte) : "memory"); #else *ptep = pte; #endif } +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ +#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) + WARN_ON(pte_present(*ptep)); +#endif + __set_pte_at(mm, addr, ptep, pte); +} + /* * 2.6 calls this without flushing the TLB entry; this is wrong * for our hash-based implementation, we fix that up here. -- cgit v1.2.1 From 9a62c05180ff55fdaa517370c6f077402820406c Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Thu, 31 Jul 2008 08:41:10 -0500 Subject: powerpc/mm: Implement _PAGE_SPECIAL & pte_special() for 32-bit Implement _PAGE_SPECIAL and pte_special() for 32-bit powerpc. This bit will be used by the fast get_user_pages() to differenciate PTEs that correspond to a valid struct page from special mappings that don't such as IO mappings obtained via io_remap_pfn_ranges(). We currently only implement this on sub-arch that support SMP or will so in the future (6xx, 44x, FSL-BookE) and not (8xx, 40x). Signed-off-by: Kumar Gala Acked-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/pgtable-ppc32.h | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index 1b1195555473..c2e58b41bc78 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -261,6 +261,7 @@ extern int icache_44x_need_flush; #define _PAGE_HWEXEC 0x00000004 /* H: Execute permission */ #define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ #define _PAGE_DIRTY 0x00000010 /* S: Page dirty */ +#define _PAGE_SPECIAL 0x00000020 /* S: Special page */ #define _PAGE_USER 0x00000040 /* S: User page */ #define _PAGE_ENDIAN 0x00000080 /* H: E bit */ #define _PAGE_GUARDED 0x00000100 /* H: G bit */ @@ -276,6 +277,7 @@ extern int icache_44x_need_flush; /* ERPN in a PTE never gets cleared, ignore it */ #define _PTE_NONE_MASK 0xffffffff00000000ULL +#define __HAVE_ARCH_PTE_SPECIAL #elif defined(CONFIG_FSL_BOOKE) /* @@ -305,6 +307,7 @@ extern int icache_44x_need_flush; #define _PAGE_COHERENT 0x00100 /* H: M bit */ #define _PAGE_NO_CACHE 0x00200 /* H: I bit */ #define _PAGE_WRITETHRU 0x00400 /* H: W bit */ +#define _PAGE_SPECIAL 0x00800 /* S: Special page */ #ifdef CONFIG_PTE_64BIT /* ERPN in a PTE never gets cleared, ignore it */ @@ -315,6 +318,8 @@ extern int icache_44x_need_flush; #define _PMD_PRESENT_MASK (PAGE_MASK) #define _PMD_BAD (~PAGE_MASK) +#define __HAVE_ARCH_PTE_SPECIAL + #elif defined(CONFIG_8xx) /* Definitions for 8xx embedded chips. */ #define _PAGE_PRESENT 0x0001 /* Page is valid */ @@ -362,6 +367,7 @@ extern int icache_44x_need_flush; #define _PAGE_ACCESSED 0x100 /* R: page referenced */ #define _PAGE_EXEC 0x200 /* software: i-cache coherency required */ #define _PAGE_RW 0x400 /* software: user write access allowed */ +#define _PAGE_SPECIAL 0x800 /* software: Special page */ #define _PTE_NONE_MASK _PAGE_HASHPTE @@ -372,6 +378,8 @@ extern int icache_44x_need_flush; /* Hash table based platforms need atomic updates of the linux PTE */ #define PTE_ATOMIC_UPDATES 1 +#define __HAVE_ARCH_PTE_SPECIAL + #endif /* @@ -404,6 +412,9 @@ extern int icache_44x_need_flush; #ifndef _PAGE_WRITETHRU #define _PAGE_WRITETHRU 0 #endif +#ifndef _PAGE_SPECIAL +#define _PAGE_SPECIAL 0 +#endif #ifndef _PMD_PRESENT_MASK #define _PMD_PRESENT_MASK _PMD_PRESENT #endif @@ -534,7 +545,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } -static inline int pte_special(pte_t pte) { return 0; } +static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } @@ -553,7 +564,7 @@ static inline pte_t pte_mkdirty(pte_t pte) { static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkspecial(pte_t pte) { - return pte; } + pte_val(pte) |= _PAGE_SPECIAL; return pte; } static inline unsigned long pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte)) & PAGE_PROT_BITS; -- cgit v1.2.1 From 4ee7084eb11e00eb02dc8435fd18273a61ffa9bf Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Wed, 24 Sep 2008 11:01:24 -0500 Subject: POWERPC: Allow 32-bit hashed pgtable code to support 36-bit physical This rearranges a bit of code, and adds support for 36-bit physical addressing for configs that use a hashed page table. The 36b physical support is not enabled by default on any config - it must be explicitly enabled via the config system. This patch *only* expands the page table code to accomodate large physical addresses on 32-bit systems and enables the PHYS_64BIT config option for 86xx. It does *not* allow you to boot a board with more than about 3.5GB of RAM - for that, SWIOTLB support is also required (and coming soon). Signed-off-by: Becky Bruce Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/io.h | 2 +- arch/powerpc/include/asm/page_32.h | 8 +++++++- arch/powerpc/include/asm/pgtable-ppc32.h | 17 +++++++++++++++-- 3 files changed, 23 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 77c7fa025e65..08266d2728b3 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -711,7 +711,7 @@ static inline void * phys_to_virt(unsigned long address) /* * Change "struct page" to physical address. */ -#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) +#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT) /* We do NOT want virtual merging, it would put too much pressure on * our iommu allocator. Instead, we want drivers to be smart enough diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h index ebfae530a379..d77072a32cc6 100644 --- a/arch/powerpc/include/asm/page_32.h +++ b/arch/powerpc/include/asm/page_32.h @@ -13,10 +13,16 @@ #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES #endif +#ifdef CONFIG_PTE_64BIT +#define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */ +#else +#define PTE_FLAGS_OFFSET 0 +#endif + #ifndef __ASSEMBLY__ /* * The basic type of a PTE - 64 bits for those CPUs with > 32 bit - * physical addressing. For now this just the IBM PPC440. + * physical addressing. */ #ifdef CONFIG_PTE_64BIT typedef unsigned long long pte_basic_t; diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index c2e58b41bc78..29c83d85b04f 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -369,7 +369,12 @@ extern int icache_44x_need_flush; #define _PAGE_RW 0x400 /* software: user write access allowed */ #define _PAGE_SPECIAL 0x800 /* software: Special page */ +#ifdef CONFIG_PTE_64BIT +/* We never clear the high word of the pte */ +#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE) +#else #define _PTE_NONE_MASK _PAGE_HASHPTE +#endif #define _PMD_PRESENT 0 #define _PMD_PRESENT_MASK (PAGE_MASK) @@ -587,6 +592,10 @@ extern int flush_hash_pages(unsigned context, unsigned long va, extern void add_hash_page(unsigned context, unsigned long va, unsigned long pmdval); +/* Flush an entry from the TLB/hash table */ +extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, + unsigned long address); + /* * Atomic PTE updates. * @@ -665,9 +674,13 @@ static inline unsigned long long pte_update(pte_t *p, static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { -#if _PAGE_HASHPTE != 0 +#if (_PAGE_HASHPTE != 0) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE); #elif defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) +#if _PAGE_HASHPTE != 0 + if (pte_val(*ptep) & _PAGE_HASHPTE) + flush_hash_entry(mm, ptep, addr); +#endif __asm__ __volatile__("\ stw%U0%X0 %2,%0\n\ eieio\n\ @@ -675,7 +688,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) : "r" (pte) : "memory"); #else - *ptep = pte; + *ptep = (*ptep & _PAGE_HASHPTE) | (pte & ~_PAGE_HASHPTE); #endif } -- cgit v1.2.1 From fbcc4bacee30cad4e4a13d05492a9ed0c9c3e8c7 Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Thu, 4 Sep 2008 04:08:20 +0000 Subject: ibm_newemac: MAL support for PowerPC 405EZ The PowerPC 405EZ SoC has some differences in the interrupt layout and handling for the MAL. The SERR, TXDE, and RXDE interrupts are OR'd into a single interrupt. Also, due to the possibility for interrupt coalescing, the TXEOB and RXEOB interrupts require an interrupt bit to be cleared in the ICINTSTAT SDR. This sets the proper MAL feature bits for 405EZ boards, and adds a common shared handler for SERR, TXDE, and RXDE. The defines for the ICINTSTAT DCR are added to the proper header file as well. This has been adapted from code originally written by Stefan Roese. Signed-off-by: Josh Boyer Acked-by: Benjamin Herrenschmidt Acked-by: Jeff Garzik Signed-off-by: Josh Boyer --- arch/powerpc/include/asm/dcr-regs.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/dcr-regs.h b/arch/powerpc/include/asm/dcr-regs.h index 29b0ecef980a..7b833ff9c14f 100644 --- a/arch/powerpc/include/asm/dcr-regs.h +++ b/arch/powerpc/include/asm/dcr-regs.h @@ -68,6 +68,13 @@ #define SDR0_UART3 0x0123 #define SDR0_CUST0 0x4000 +/* SDR for 405EZ */ +#define DCRN_SDR_ICINTSTAT 0x4510 +#define ICINTSTAT_ICRX 0x80000000 +#define ICINTSTAT_ICTX0 0x40000000 +#define ICINTSTAT_ICTX1 0x20000000 +#define ICINTSTAT_ICTX 0x60000000 + /* * All those DCR register addresses are offsets from the base address * for the SRAM0 controller (e.g. 0x20 on 440GX). The base address is -- cgit v1.2.1 From d2b194ed820880eb19c43b9c10d9f5f30026ee54 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 4 Jun 2008 02:59:29 -0500 Subject: powerpc/math-emu: Use kernel generic math-emu code The math emulation code is centered around a set of generic macros that provide the core of the emulation that are shared by the various architectures and other projects (like glibc). Each arch implements its own sfp-machine.h to specific various arch specific details. For historic reasons that are now lost the powerpc math-emu code had its own version of the common headers. This moves us to using the kernel generic version and thus getting fixes when those are updated. Also cleaned up exception/error reporting from the FP emulation functions. Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/sfp-machine.h | 353 +++++++++++++++++++++++++++++++++ 1 file changed, 353 insertions(+) create mode 100644 arch/powerpc/include/asm/sfp-machine.h (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h new file mode 100644 index 000000000000..ced34f1dc8f8 --- /dev/null +++ b/arch/powerpc/include/asm/sfp-machine.h @@ -0,0 +1,353 @@ +/* Machine-dependent software floating-point definitions. PPC version. + Copyright (C) 1997 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with the GNU C Library; see the file COPYING.LIB. If + not, write to the Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + Actually, this is a PPC (32bit) version, written based on the + i386, sparc, and sparc64 versions, by me, + Peter Maydell (pmaydell@chiark.greenend.org.uk). + Comments are by and large also mine, although they may be inaccurate. + + In picking out asm fragments I've gone with the lowest common + denominator, which also happens to be the hardware I have :-> + That is, a SPARC without hardware multiply and divide. + */ + +/* basic word size definitions */ +#define _FP_W_TYPE_SIZE 32 +#define _FP_W_TYPE unsigned long +#define _FP_WS_TYPE signed long +#define _FP_I_TYPE long + +#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2)) +#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1)) +#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2)) + +/* You can optionally code some things like addition in asm. For + * example, i386 defines __FP_FRAC_ADD_2 as asm. If you don't + * then you get a fragment of C code [if you change an #ifdef 0 + * in op-2.h] or a call to add_ssaaaa (see below). + * Good places to look for asm fragments to use are gcc and glibc. + * gcc's longlong.h is useful. + */ + +/* We need to know how to multiply and divide. If the host word size + * is >= 2*fracbits you can use FP_MUL_MEAT_n_imm(t,R,X,Y) which + * codes the multiply with whatever gcc does to 'a * b'. + * _FP_MUL_MEAT_n_wide(t,R,X,Y,f) is used when you have an asm + * function that can multiply two 1W values and get a 2W result. + * Otherwise you're stuck with _FP_MUL_MEAT_n_hard(t,R,X,Y) which + * does bitshifting to avoid overflow. + * For division there is FP_DIV_MEAT_n_imm(t,R,X,Y,f) for word size + * >= 2*fracbits, where f is either _FP_DIV_HELP_imm or + * _FP_DIV_HELP_ldiv (see op-1.h). + * _FP_DIV_MEAT_udiv() is if you have asm to do 2W/1W => (1W, 1W). + * [GCC and glibc have longlong.h which has the asm macro udiv_qrnnd + * to do this.] + * In general, 'n' is the number of words required to hold the type, + * and 't' is either S, D or Q for single/double/quad. + * -- PMM + */ +/* Example: SPARC64: + * #define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_imm(S,R,X,Y) + * #define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_1_wide(D,R,X,Y,umul_ppmm) + * #define _FP_MUL_MEAT_Q(R,X,Y) _FP_MUL_MEAT_2_wide(Q,R,X,Y,umul_ppmm) + * + * #define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm) + * #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv(D,R,X,Y) + * #define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv_64(Q,R,X,Y) + * + * Example: i386: + * #define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(S,R,X,Y,_i386_mul_32_64) + * #define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(D,R,X,Y,_i386_mul_32_64) + * + * #define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y,_i386_div_64_32) + * #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv_64(D,R,X,Y) + */ + +#define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm) +#define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) + +#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y) +#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y) + +/* These macros define what NaN looks like. They're supposed to expand to + * a comma-separated set of 32bit unsigned ints that encode NaN. + */ +#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1) +#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1 +#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1 +#define _FP_NANSIGN_S 0 +#define _FP_NANSIGN_D 0 +#define _FP_NANSIGN_Q 0 + +#define _FP_KEEPNANFRACP 1 + +/* Exception flags. We use the bit positions of the appropriate bits + in the FPSCR, which also correspond to the FE_* bits. This makes + everything easier ;-). */ +#define FP_EX_INVALID (1 << (31 - 2)) +#define FP_EX_INVALID_SNAN EFLAG_VXSNAN +#define FP_EX_INVALID_ISI EFLAG_VXISI +#define FP_EX_INVALID_IDI EFLAG_VXIDI +#define FP_EX_INVALID_ZDZ EFLAG_VXZDZ +#define FP_EX_INVALID_IMZ EFLAG_VXIMZ +#define FP_EX_OVERFLOW (1 << (31 - 3)) +#define FP_EX_UNDERFLOW (1 << (31 - 4)) +#define FP_EX_DIVZERO (1 << (31 - 5)) +#define FP_EX_INEXACT (1 << (31 - 6)) + +/* This macro appears to be called when both X and Y are NaNs, and + * has to choose one and copy it to R. i386 goes for the larger of the + * two, sparc64 just picks Y. I don't understand this at all so I'll + * go with sparc64 because it's shorter :-> -- PMM + */ +#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ + do { \ + R##_s = Y##_s; \ + _FP_FRAC_COPY_##wc(R,Y); \ + R##_c = FP_CLS_NAN; \ + } while (0) + + +#include +#include + +#define __FPU_FPSCR (current->thread.fpscr.val) + +/* We only actually write to the destination register + * if exceptions signalled (if any) will not trap. + */ +#define __FPU_ENABLED_EXC \ +({ \ + (__FPU_FPSCR >> 3) & 0x1f; \ +}) + +#define __FPU_TRAP_P(bits) \ + ((__FPU_ENABLED_EXC & (bits)) != 0) + +#define __FP_PACK_S(val,X) \ +({ int __exc = _FP_PACK_CANONICAL(S,1,X); \ + if(!__exc || !__FPU_TRAP_P(__exc)) \ + _FP_PACK_RAW_1_P(S,val,X); \ + __exc; \ +}) + +#define __FP_PACK_D(val,X) \ + do { \ + _FP_PACK_CANONICAL(D, 2, X); \ + if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) \ + _FP_PACK_RAW_2_P(D, val, X); \ + } while (0) + +#define __FP_PACK_DS(val,X) \ + do { \ + FP_DECL_S(__X); \ + FP_CONV(S, D, 1, 2, __X, X); \ + _FP_PACK_CANONICAL(S, 1, __X); \ + if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) { \ + _FP_UNPACK_CANONICAL(S, 1, __X); \ + FP_CONV(D, S, 2, 1, X, __X); \ + _FP_PACK_CANONICAL(D, 2, X); \ + if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) \ + _FP_PACK_RAW_2_P(D, val, X); \ + } \ + } while (0) + +/* Obtain the current rounding mode. */ +#define FP_ROUNDMODE \ +({ \ + __FPU_FPSCR & 0x3; \ +}) + +/* the asm fragments go here: all these are taken from glibc-2.0.5's + * stdlib/longlong.h + */ + +#include +#include + +/* add_ssaaaa is used in op-2.h and should be equivalent to + * #define add_ssaaaa(sh,sl,ah,al,bh,bl) (sh = ah+bh+ (( sl = al+bl) < al)) + * add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1, + * high_addend_2, low_addend_2) adds two UWtype integers, composed by + * HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2 + * respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow + * (i.e. carry out) is not stored anywhere, and is lost. + */ +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + do { \ + if (__builtin_constant_p (bh) && (bh) == 0) \ + __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "%r" ((USItype)(ah)), \ + "%r" ((USItype)(al)), \ + "rI" ((USItype)(bl))); \ + else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \ + __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "%r" ((USItype)(ah)), \ + "%r" ((USItype)(al)), \ + "rI" ((USItype)(bl))); \ + else \ + __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "%r" ((USItype)(ah)), \ + "r" ((USItype)(bh)), \ + "%r" ((USItype)(al)), \ + "rI" ((USItype)(bl))); \ + } while (0) + +/* sub_ddmmss is used in op-2.h and udivmodti4.c and should be equivalent to + * #define sub_ddmmss(sh, sl, ah, al, bh, bl) (sh = ah-bh - ((sl = al-bl) > al)) + * sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend, + * high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers, + * composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and + * LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE + * and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere, + * and is lost. + */ +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + do { \ + if (__builtin_constant_p (ah) && (ah) == 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "r" ((USItype)(bh)), \ + "rI" ((USItype)(al)), \ + "r" ((USItype)(bl))); \ + else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "r" ((USItype)(bh)), \ + "rI" ((USItype)(al)), \ + "r" ((USItype)(bl))); \ + else if (__builtin_constant_p (bh) && (bh) == 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "r" ((USItype)(ah)), \ + "rI" ((USItype)(al)), \ + "r" ((USItype)(bl))); \ + else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "r" ((USItype)(ah)), \ + "rI" ((USItype)(al)), \ + "r" ((USItype)(bl))); \ + else \ + __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "r" ((USItype)(ah)), \ + "r" ((USItype)(bh)), \ + "rI" ((USItype)(al)), \ + "r" ((USItype)(bl))); \ + } while (0) + +/* asm fragments for mul and div */ + +/* umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two + * UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype + * word product in HIGH_PROD and LOW_PROD. + */ +#define umul_ppmm(ph, pl, m0, m1) \ + do { \ + USItype __m0 = (m0), __m1 = (m1); \ + __asm__ ("mulhwu %0,%1,%2" \ + : "=r" ((USItype)(ph)) \ + : "%r" (__m0), \ + "r" (__m1)); \ + (pl) = __m0 * __m1; \ + } while (0) + +/* udiv_qrnnd(quotient, remainder, high_numerator, low_numerator, + * denominator) divides a UDWtype, composed by the UWtype integers + * HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient + * in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less + * than DENOMINATOR for correct operation. If, in addition, the most + * significant bit of DENOMINATOR must be 1, then the pre-processor symbol + * UDIV_NEEDS_NORMALIZATION is defined to 1. + */ +#define udiv_qrnnd(q, r, n1, n0, d) \ + do { \ + UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \ + __d1 = __ll_highpart (d); \ + __d0 = __ll_lowpart (d); \ + \ + __r1 = (n1) % __d1; \ + __q1 = (n1) / __d1; \ + __m = (UWtype) __q1 * __d0; \ + __r1 = __r1 * __ll_B | __ll_highpart (n0); \ + if (__r1 < __m) \ + { \ + __q1--, __r1 += (d); \ + if (__r1 >= (d)) /* we didn't get carry when adding to __r1 */ \ + if (__r1 < __m) \ + __q1--, __r1 += (d); \ + } \ + __r1 -= __m; \ + \ + __r0 = __r1 % __d1; \ + __q0 = __r1 / __d1; \ + __m = (UWtype) __q0 * __d0; \ + __r0 = __r0 * __ll_B | __ll_lowpart (n0); \ + if (__r0 < __m) \ + { \ + __q0--, __r0 += (d); \ + if (__r0 >= (d)) \ + if (__r0 < __m) \ + __q0--, __r0 += (d); \ + } \ + __r0 -= __m; \ + \ + (q) = (UWtype) __q1 * __ll_B | __q0; \ + (r) = __r0; \ + } while (0) + +#define UDIV_NEEDS_NORMALIZATION 1 + +#define abort() \ + return 0 + +#ifdef __BIG_ENDIAN +#define __BYTE_ORDER __BIG_ENDIAN +#else +#define __BYTE_ORDER __LITTLE_ENDIAN +#endif + +/* Exception flags. */ +#define EFLAG_INVALID (1 << (31 - 2)) +#define EFLAG_OVERFLOW (1 << (31 - 3)) +#define EFLAG_UNDERFLOW (1 << (31 - 4)) +#define EFLAG_DIVZERO (1 << (31 - 5)) +#define EFLAG_INEXACT (1 << (31 - 6)) + +#define EFLAG_VXSNAN (1 << (31 - 7)) +#define EFLAG_VXISI (1 << (31 - 8)) +#define EFLAG_VXIDI (1 << (31 - 9)) +#define EFLAG_VXZDZ (1 << (31 - 10)) +#define EFLAG_VXIMZ (1 << (31 - 11)) +#define EFLAG_VXVC (1 << (31 - 12)) +#define EFLAG_VXSOFT (1 << (31 - 21)) +#define EFLAG_VXSQRT (1 << (31 - 22)) +#define EFLAG_VXCVI (1 << (31 - 23)) -- cgit v1.2.1 From 9e3cb29497561c846d0e7efc445731764d93c749 Mon Sep 17 00:00:00 2001 From: Victor Gallardo Date: Wed, 1 Oct 2008 23:37:57 -0700 Subject: ibm_newemac: Add support for GPCS, SGMII and M88E1112 PHY Add support for the phy types found on the Arches and other PowerPC 460 based boards. Signed-off-by: Victor Gallardo Acked-by: Benjamin Herrenschmidt Acked-by: Jeff Garzik Signed-off-by: Josh Boyer --- arch/powerpc/include/asm/dcr-regs.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/dcr-regs.h b/arch/powerpc/include/asm/dcr-regs.h index 7b833ff9c14f..828e3aa1f2fc 100644 --- a/arch/powerpc/include/asm/dcr-regs.h +++ b/arch/powerpc/include/asm/dcr-regs.h @@ -75,6 +75,10 @@ #define ICINTSTAT_ICTX1 0x20000000 #define ICINTSTAT_ICTX 0x60000000 +/* SDRs (460EX/460GT) */ +#define SDR0_ETH_CFG 0x4103 +#define SDR0_ETH_CFG_ECS 0x00000100 /* EMAC int clk source */ + /* * All those DCR register addresses are offsets from the base address * for the SRAM0 controller (e.g. 0x20 on 440GX). The base address is -- cgit v1.2.1 From 6fbc779c03591ee536fef9efb7d7e20f281d0b5c Mon Sep 17 00:00:00 2001 From: Victor Gallardo Date: Thu, 18 Sep 2008 12:41:26 +0000 Subject: ibm_newemac: Fix EMAC soft reset on 460EX/GT This patch fixes EMAC soft reset on 460EX/GT when no external clock is available. Signed-off-by: Victor Gallardo Signed-off-by: David S. Miller --- arch/powerpc/include/asm/dcr-regs.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/dcr-regs.h b/arch/powerpc/include/asm/dcr-regs.h index 29b0ecef980a..f15296cf3598 100644 --- a/arch/powerpc/include/asm/dcr-regs.h +++ b/arch/powerpc/include/asm/dcr-regs.h @@ -68,6 +68,10 @@ #define SDR0_UART3 0x0123 #define SDR0_CUST0 0x4000 +/* SDRs (460EX/460GT) */ +#define SDR0_ETH_CFG 0x4103 +#define SDR0_ETH_CFG_ECS 0x00000100 /* EMAC int clk source */ + /* * All those DCR register addresses are offsets from the base address * for the SRAM0 controller (e.g. 0x20 on 440GX). The base address is -- cgit v1.2.1 From 78b5b626fa9048337530cc3ba4ceb9e4ee96a386 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Fri, 10 Oct 2008 09:44:33 +0000 Subject: powerpc: Make ppc32 respect the boot cpu id for !CONFIG_SMP Previously the FDT header field boot_cpuid_phys wasn't actually used on ppc32. Instead the physical boot cpuid was assumed to be 0 for !CONFIG_SMP. Signed-off-by: Kumar Gala Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/smp.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index c092f84302fd..1866cec4f967 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -93,7 +93,7 @@ extern void __cpu_die(unsigned int cpu); #else /* for UP */ -#define hard_smp_processor_id() 0 +#define hard_smp_processor_id() get_hard_smp_processor_id(0) #define smp_setup_cpu_maps() #endif /* CONFIG_SMP */ @@ -122,6 +122,7 @@ static inline int get_hard_smp_processor_id(int cpu) static inline void set_hard_smp_processor_id(int cpu, int phys) { + boot_cpuid_phys = phys; } #endif /* !CONFIG_SMP */ #endif /* !CONFIG_PPC64 */ -- cgit v1.2.1 From c0da99d5f7b0349cb11f970b3283c0d57beb5ec9 Mon Sep 17 00:00:00 2001 From: Anton Vorontsov Date: Thu, 9 Oct 2008 04:32:59 +0400 Subject: powerpc: fix fsl_upm nand driver modular build The fsl_upm nand driver fails to build because fsl_lbc_lock isn't exported, the lock is needed by the inlined fsl_upm_run_pattern() function: ERROR: "fsl_lbc_lock" [drivers/mtd/nand/fsl_upm.ko] undefined! Dave Jones purposed to export the lock, but it is better to just uninline the fsl_upm_run_pattern(). When uninlined we also no longer need the exported fsl_lbc_regs, and both fsl_lbc_lock and fsl_lbc_regs could be marked static. While at it, also add some missing includes that we should have included explicitly. Reported-by: Dave Jones Signed-off-by: Anton Vorontsov Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/fsl_lbc.h | 48 ++++---------------------------------- 1 file changed, 4 insertions(+), 44 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h index 303f5484c050..63a4f779f531 100644 --- a/arch/powerpc/include/asm/fsl_lbc.h +++ b/arch/powerpc/include/asm/fsl_lbc.h @@ -23,9 +23,9 @@ #ifndef __ASM_FSL_LBC_H #define __ASM_FSL_LBC_H +#include #include -#include -#include +#include struct fsl_lbc_bank { __be32 br; /**< Base Register */ @@ -227,9 +227,6 @@ struct fsl_lbc_regs { u8 res8[0xF00]; }; -extern struct fsl_lbc_regs __iomem *fsl_lbc_regs; -extern spinlock_t fsl_lbc_lock; - /* * FSL UPM routines */ @@ -268,44 +265,7 @@ static inline void fsl_upm_end_pattern(struct fsl_upm *upm) cpu_relax(); } -/** - * fsl_upm_run_pattern - actually run an UPM pattern - * @upm: pointer to the fsl_upm structure obtained via fsl_upm_find - * @io_base: remapped pointer to where memory access should happen - * @mar: MAR register content during pattern execution - * - * This function triggers dummy write to the memory specified by the io_base, - * thus UPM pattern actually executed. Note that mar usage depends on the - * pre-programmed AMX bits in the UPM RAM. - */ -static inline int fsl_upm_run_pattern(struct fsl_upm *upm, - void __iomem *io_base, u32 mar) -{ - int ret = 0; - unsigned long flags; - - spin_lock_irqsave(&fsl_lbc_lock, flags); - - out_be32(&fsl_lbc_regs->mar, mar << (32 - upm->width)); - - switch (upm->width) { - case 8: - out_8(io_base, 0x0); - break; - case 16: - out_be16(io_base, 0x0); - break; - case 32: - out_be32(io_base, 0x0); - break; - default: - ret = -EINVAL; - break; - } - - spin_unlock_irqrestore(&fsl_lbc_lock, flags); - - return ret; -} +extern int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, + u32 mar); #endif /* __ASM_FSL_LBC_H */ -- cgit v1.2.1 From f5ea64dcbad89875d130596df14c9b25d994a737 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Sun, 12 Oct 2008 17:54:24 +0000 Subject: powerpc: Get USE_STRICT_MM_TYPECHECKS working again The typesafe version of the powerpc pagetable handling (with USE_STRICT_MM_TYPECHECKS defined) has bitrotted again. This patch makes a bunch of small fixes to get it back to building status. It's still not enabled by default as gcc still generates worse code with it for some reason. Signed-off-by: David Gibson Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/mman.h | 2 +- arch/powerpc/include/asm/pgtable-ppc32.h | 17 +++++++++-------- arch/powerpc/include/asm/pgtable-ppc64.h | 12 ++++++------ 3 files changed, 16 insertions(+), 15 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 9209f755763e..e7b99bac9f48 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot) static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) { - return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : 0; + return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); } #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index 29c83d85b04f..6ab7c67cb5ab 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -431,11 +431,11 @@ extern int icache_44x_need_flush; #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) -#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ - _PAGE_WRITETHRU | _PAGE_ENDIAN | \ - _PAGE_USER | _PAGE_ACCESSED | \ - _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ - _PAGE_EXEC | _PAGE_HWEXEC) +#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ + _PAGE_WRITETHRU | _PAGE_ENDIAN | \ + _PAGE_USER | _PAGE_ACCESSED | \ + _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ + _PAGE_EXEC | _PAGE_HWEXEC) /* * Note: the _PAGE_COHERENT bit automatically gets set in the hardware * PTE if CONFIG_SMP is defined (hash_page does this); there is no need @@ -570,9 +570,9 @@ static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; } -static inline unsigned long pte_pgprot(pte_t pte) +static inline pgprot_t pte_pgprot(pte_t pte) { - return __pgprot(pte_val(pte)) & PAGE_PROT_BITS; + return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) @@ -688,7 +688,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) : "r" (pte) : "memory"); #else - *ptep = (*ptep & _PAGE_HASHPTE) | (pte & ~_PAGE_HASHPTE); + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); #endif } diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 4597c491e9b5..4c0a8c62859d 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -117,10 +117,10 @@ #define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) #define HAVE_PAGE_AGP -#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | \ - _PAGE_NO_CACHE | _PAGE_WRITETHRU | \ - _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \ - _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC) +#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | \ + _PAGE_NO_CACHE | _PAGE_WRITETHRU | \ + _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \ + _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC) /* PTEIDX nibble */ #define _PTEIDX_SECONDARY 0x8 #define _PTEIDX_GROUP_IX 0x7 @@ -264,9 +264,9 @@ static inline pte_t pte_mkhuge(pte_t pte) { return pte; } static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; } -static inline unsigned long pte_pgprot(pte_t pte) +static inline pgprot_t pte_pgprot(pte_t pte) { - return __pgprot(pte_val(pte)) & PAGE_PROT_BITS; + return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } /* Atomic PTE updates */ -- cgit v1.2.1 From 6a0ab738ef42d87951b3980f61b1f4cbb14d4171 Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Fri, 25 Jul 2008 13:54:49 -0500 Subject: KVM: ppc: guest breakpoint support Allow host userspace to program hardware debug registers to set breakpoints inside guests. Signed-off-by: Jerone Young Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_host.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 2655e2a4831e..23bad40b0ea6 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -86,6 +86,11 @@ struct kvm_vcpu_arch { u32 host_stack; u32 host_pid; + u32 host_dbcr0; + u32 host_dbcr1; + u32 host_dbcr2; + u32 host_iac[4]; + u32 host_msr; u64 fpr[32]; u32 gpr[32]; -- cgit v1.2.1 From 20754c2495a791b5b429c0da63394c86ade978e7 Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Fri, 25 Jul 2008 13:54:51 -0500 Subject: KVM: ppc: Stop saving host TLB state We're saving the host TLB state to memory on every exit, but never using it. Originally I had thought that we'd want to restore host TLB for heavyweight exits, but that could actually hurt when context switching to an unrelated host process (i.e. not qemu). Since this decreases the performance penalty of all exits, this patch improves guest boot time by about 15%. Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_host.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 23bad40b0ea6..dc3a7562bae4 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -81,8 +81,6 @@ struct kvm_vcpu_arch { struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; /* Pages which are referenced in the shadow TLB. */ struct page *shadow_pages[PPC44x_TLB_SIZE]; - /* Copy of the host's TLB. */ - struct tlbe host_tlb[PPC44x_TLB_SIZE]; u32 host_stack; u32 host_pid; -- cgit v1.2.1 From 83aae4a8098eb8a40a2e9dab3714354182143b4f Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Fri, 25 Jul 2008 13:54:52 -0500 Subject: KVM: ppc: Write only modified shadow entries into the TLB on exit Track which TLB entries need to be written, instead of overwriting everything below the high water mark. Typically only a single guest TLB entry will be modified in a single exit. Guest boot time performance improvement: about 15%. Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_host.h | 3 +++ arch/powerpc/include/asm/kvm_ppc.h | 3 +++ 2 files changed, 6 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index dc3a7562bae4..4338b03da8f9 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -82,6 +82,9 @@ struct kvm_vcpu_arch { /* Pages which are referenced in the shadow TLB. */ struct page *shadow_pages[PPC44x_TLB_SIZE]; + /* Track which TLB entries we've modified in the current exit. */ + u8 shadow_tlb_mod[PPC44x_TLB_SIZE]; + u32 host_stack; u32 host_pid; u32 host_dbcr0; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index a8b068792260..8e7e42959903 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -65,6 +65,9 @@ extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, gva_t eend, u32 asid); extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); +/* XXX Book E specific */ +extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i); + extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu); static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception) -- cgit v1.2.1 From 49dd2c492895828a90ecdf889e7fe9cfb40a82a7 Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Fri, 25 Jul 2008 13:54:53 -0500 Subject: KVM: powerpc: Map guest userspace with TID=0 mappings When we use TID=N userspace mappings, we must ensure that kernel mappings have been destroyed when entering userspace. Using TID=1/TID=0 for kernel/user mappings and running userspace with PID=0 means that userspace can't access the kernel mappings, but the kernel can directly access userspace. The net is that we don't need to flush the TLB on privilege switches, but we do on guest context switches (which are far more infrequent). Guest boot time performance improvement: about 30%. Signed-off-by: Hollis Blanchard Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_host.h | 4 ++++ arch/powerpc/include/asm/kvm_ppc.h | 9 +++++++++ 2 files changed, 13 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 4338b03da8f9..34b52b7180cd 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -129,7 +129,11 @@ struct kvm_vcpu_arch { u32 ivor[16]; u32 ivpr; u32 pir; + + u32 shadow_pid; u32 pid; + u32 swap_pid; + u32 pvr; u32 ccr0; u32 ccr1; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 8e7e42959903..8931ba729d2b 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -64,6 +64,7 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, gva_t eend, u32 asid); extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); +extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); /* XXX Book E specific */ extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i); @@ -95,4 +96,12 @@ static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) kvm_vcpu_block(vcpu); } +static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid) +{ + if (vcpu->arch.pid != new_pid) { + vcpu->arch.pid = new_pid; + vcpu->arch.swap_pid = 1; + } +} + #endif /* __POWERPC_KVM_PPC_H__ */ -- cgit v1.2.1 From 0b59268285ca6cdc46191f2995bf632088e3e277 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 16 Oct 2008 15:39:57 +0200 Subject: [PATCH] remove unused ibcs2/PER_SVR4 in SET_PERSONALITY The SET_PERSONALITY macro is always called with a second argument of 0. Remove the ibcs argument and the various tests to set the PER_SVR4 personality. Signed-off-by: Martin Schwidefsky --- arch/powerpc/include/asm/elf.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index 64c6ee22eefd..d812929390e4 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -232,7 +232,7 @@ typedef elf_vrregset_t elf_fpxregset_t; #endif /* __powerpc64__ */ #ifdef __powerpc64__ -# define SET_PERSONALITY(ex, ibcs2) \ +# define SET_PERSONALITY(ex) \ do { \ unsigned long new_flags = 0; \ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ @@ -256,7 +256,7 @@ do { \ # define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \ (exec_stk != EXSTACK_DISABLE_X) : 0) #else -# define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX) +# define SET_PERSONALITY(ex) set_personality(PER_LINUX) #endif /* __powerpc64__ */ extern int dcache_bsize; -- cgit v1.2.1 From 756ba83ee370fbf62643777e7ba4a4f05932f6fb Mon Sep 17 00:00:00 2001 From: Masakazu Mokuno Date: Mon, 20 Oct 2008 08:03:33 +0200 Subject: ps3: Add ps3av_audio_mute_analog() Add support for muting the analog output so that it does not play noises while non-PCM data is played. Signed-off-by: Masakazu Mokuno Signed-off-by: Takashi Iwai --- arch/powerpc/include/asm/ps3av.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h index fda98715cd35..d30bde2b11ef 100644 --- a/arch/powerpc/include/asm/ps3av.h +++ b/arch/powerpc/include/asm/ps3av.h @@ -735,6 +735,7 @@ extern int ps3av_get_mode(void); extern int ps3av_video_mode2res(u32, u32 *, u32 *); extern int ps3av_video_mute(int); extern int ps3av_audio_mute(int); +extern int ps3av_audio_mute_analog(int); extern int ps3av_dev_open(void); extern int ps3av_dev_close(void); extern void ps3av_register_flip_ctl(void (*flip_ctl)(int on, void *data), -- cgit v1.2.1 From 64931a4be03dbc49bd50d10d211592cf98b523bb Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 20 Oct 2008 08:05:10 +0200 Subject: ps3: Add passthru support for non-audio streams Add support for the channel status bit setting so that non-PCM data stream can be sent (i.e. pass-through) via SPDIF/HDMI. Signed-off-by: Masakazu Mokuno Acked-by: Geert Uytterhoeven Signed-off-by: Takashi Iwai --- arch/powerpc/include/asm/ps3av.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h index d30bde2b11ef..5aa22cffdbd6 100644 --- a/arch/powerpc/include/asm/ps3av.h +++ b/arch/powerpc/include/asm/ps3av.h @@ -678,6 +678,8 @@ struct ps3av_pkt_avb_param { u8 buf[PS3AV_PKT_AVB_PARAM_MAX_BUF_SIZE]; }; +/* channel status */ +extern u8 ps3av_mode_cs_info[]; /** command status **/ #define PS3AV_STATUS_SUCCESS 0x0000 /* success */ -- cgit v1.2.1