diff options
Diffstat (limited to 'arch/alpha')
25 files changed, 1253 insertions, 226 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 75291fdd379f..b9647bb66d13 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -47,13 +47,8 @@ config GENERIC_CALIBRATE_DELAY bool default y -config GENERIC_TIME - bool - default y - -config ARCH_USES_GETTIMEOFFSET - bool - default y +config GENERIC_CMOS_UPDATE + def_bool y config ZONE_DMA bool @@ -62,6 +57,9 @@ config ZONE_DMA config NEED_DMA_MAP_STATE def_bool y +config NEED_SG_DMA_LENGTH + def_bool y + config GENERIC_ISA_DMA bool default y diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 610dff44d94b..e756d04b6cd5 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -17,8 +17,8 @@ #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) -#define atomic_read(v) ((v)->counter + 0) -#define atomic64_read(v) ((v)->counter + 0) +#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic64_read(v) (*(volatile long *)&(v)->counter) #define atomic_set(v,i) ((v)->counter = (i)) #define atomic64_set(v,i) ((v)->counter = (i)) diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h index 15f3ae25c511..adfab8a21dfe 100644 --- a/arch/alpha/include/asm/bitops.h +++ b/arch/alpha/include/asm/bitops.h @@ -405,29 +405,31 @@ static inline int fls(int x) #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) /* Whee. EV67 can calculate it directly. */ -static inline unsigned long hweight64(unsigned long w) +static inline unsigned long __arch_hweight64(unsigned long w) { return __kernel_ctpop(w); } -static inline unsigned int hweight32(unsigned int w) +static inline unsigned int __arch_hweight32(unsigned int w) { - return hweight64(w); + return __arch_hweight64(w); } -static inline unsigned int hweight16(unsigned int w) +static inline unsigned int __arch_hweight16(unsigned int w) { - return hweight64(w & 0xffff); + return __arch_hweight64(w & 0xffff); } -static inline unsigned int hweight8(unsigned int w) +static inline unsigned int __arch_hweight8(unsigned int w) { - return hweight64(w & 0xff); + return __arch_hweight64(w & 0xff); } #else -#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/arch_hweight.h> #endif +#include <asm-generic/bitops/const_hweight.h> + #endif /* __KERNEL__ */ #include <asm-generic/bitops/find.h> @@ -436,22 +438,20 @@ static inline unsigned int hweight8(unsigned int w) /* * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is set. + * way of searching a 100-bit bitmap. It's guaranteed that at least + * one of the 100 bits is cleared. */ static inline unsigned long -sched_find_first_bit(unsigned long b[3]) +sched_find_first_bit(const unsigned long b[2]) { - unsigned long b0 = b[0], b1 = b[1], b2 = b[2]; - unsigned long ofs; + unsigned long b0, b1, ofs, tmp; - ofs = (b1 ? 64 : 128); - b1 = (b1 ? b1 : b2); - ofs = (b0 ? 0 : ofs); - b0 = (b0 ? b0 : b1); + b0 = b[0]; + b1 = b[1]; + ofs = (b0 ? 0 : 64); + tmp = (b0 ? b0 : b1); - return __ffs(b0) + ofs; + return __ffs(tmp) + ofs; } #include <asm-generic/bitops/ext2-non-atomic.h> diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 1bce8169733c..4567aca6fdd6 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -41,9 +41,7 @@ static inline int dma_set_mask(struct device *dev, u64 mask) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_is_consistent(d, h) (1) #define dma_cache_sync(dev, va, size, dir) ((void)0) -#define dma_get_cache_alignment() L1_CACHE_BYTES #endif /* _ALPHA_DMA_MAPPING_H */ diff --git a/arch/alpha/include/asm/hw_irq.h b/arch/alpha/include/asm/hw_irq.h index a37db0f95092..5050ac81cd90 100644 --- a/arch/alpha/include/asm/hw_irq.h +++ b/arch/alpha/include/asm/hw_irq.h @@ -3,6 +3,7 @@ extern volatile unsigned long irq_err_count; +DECLARE_PER_CPU(unsigned long, irq_pmi_count); #ifdef CONFIG_ALPHA_GENERIC #define ACTUAL_NR_IRQS alpha_mv.nr_irqs diff --git a/arch/alpha/include/asm/ioctls.h b/arch/alpha/include/asm/ioctls.h index 67bb9f6fdbe4..59617c3c2be6 100644 --- a/arch/alpha/include/asm/ioctls.h +++ b/arch/alpha/include/asm/ioctls.h @@ -80,6 +80,7 @@ # define TIOCPKT_START 8 # define TIOCPKT_NOSTOP 16 # define TIOCPKT_DOSTOP 32 +# define TIOCPKT_IOCTL 64 #define TIOCNOTTY 0x5422 @@ -91,6 +92,7 @@ #define TIOCGSID 0x5429 /* Return the session ID of FD */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ #define TIOCSERCONFIG 0x5453 #define TIOCSERGWILD 0x5454 @@ -106,7 +108,5 @@ #define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ -#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ -#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ #endif /* _ASM_ALPHA_IOCTLS_H */ diff --git a/arch/alpha/include/asm/local64.h b/arch/alpha/include/asm/local64.h new file mode 100644 index 000000000000..36c93b5cc239 --- /dev/null +++ b/arch/alpha/include/asm/local64.h @@ -0,0 +1 @@ +#include <asm-generic/local64.h> diff --git a/arch/alpha/include/asm/md.h b/arch/alpha/include/asm/md.h deleted file mode 100644 index 6c9b8222a4f2..000000000000 --- a/arch/alpha/include/asm/md.h +++ /dev/null @@ -1,13 +0,0 @@ -/* $Id: md.h,v 1.1 1997/12/15 15:11:48 jj Exp $ - * md.h: High speed xor_block operation for RAID4/5 - * - */ - -#ifndef __ASM_MD_H -#define __ASM_MD_H - -/* #define HAVE_ARCH_XORBLOCK */ - -#define MD_XORBLOCK_ALIGNMENT sizeof(long) - -#endif /* __ASM_MD_H */ diff --git a/arch/alpha/include/asm/perf_event.h b/arch/alpha/include/asm/perf_event.h index 3bef8522017c..4157cd3c44a9 100644 --- a/arch/alpha/include/asm/perf_event.h +++ b/arch/alpha/include/asm/perf_event.h @@ -2,8 +2,14 @@ #define __ASM_ALPHA_PERF_EVENT_H /* Alpha only supports software events through this interface. */ -static inline void set_perf_event_pending(void) { } +extern void set_perf_event_pending(void); #define PERF_EVENT_INDEX_OFFSET 0 +#ifdef CONFIG_PERF_EVENTS +extern void init_hw_perf_events(void); +#else +static inline void init_hw_perf_events(void) { } +#endif + #endif /* __ASM_ALPHA_PERF_EVENT_H */ diff --git a/arch/alpha/include/asm/scatterlist.h b/arch/alpha/include/asm/scatterlist.h index 440747ca6349..017d7471c3c4 100644 --- a/arch/alpha/include/asm/scatterlist.h +++ b/arch/alpha/include/asm/scatterlist.h @@ -1,25 +1,6 @@ #ifndef _ALPHA_SCATTERLIST_H #define _ALPHA_SCATTERLIST_H -#include <asm/page.h> -#include <asm/types.h> - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - - unsigned int length; - - dma_addr_t dma_address; - __u32 dma_length; -}; - -#define sg_dma_address(sg) ((sg)->dma_address) -#define sg_dma_len(sg) ((sg)->dma_length) - -#define ISA_DMA_THRESHOLD (~0UL) +#include <asm-generic/scatterlist.h> #endif /* !(_ALPHA_SCATTERLIST_H) */ diff --git a/arch/alpha/include/asm/termbits.h b/arch/alpha/include/asm/termbits.h index ad854a4a3af6..879dd3589921 100644 --- a/arch/alpha/include/asm/termbits.h +++ b/arch/alpha/include/asm/termbits.h @@ -180,6 +180,7 @@ struct ktermios { #define FLUSHO 0x00800000 #define PENDIN 0x20000000 #define IEXTEN 0x00000400 +#define EXTPROC 0x10000000 /* Values for the ACTION argument to `tcflow'. */ #define TCOOFF 0 diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index b3e888638bb7..6f32f9c84a2d 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h @@ -77,7 +77,7 @@ register struct thread_info *__current_thread_info __asm__("$8"); #define TIF_UAC_NOPRINT 10 /* see sysinfo.h */ #define TIF_UAC_NOFIX 11 #define TIF_UAC_SIGBUS 12 -#define TIF_MEMDIE 13 +#define TIF_MEMDIE 13 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */ #define TIF_FREEZE 16 /* is freezing for suspend */ diff --git a/arch/alpha/include/asm/wrperfmon.h b/arch/alpha/include/asm/wrperfmon.h new file mode 100644 index 000000000000..319bf6788d87 --- /dev/null +++ b/arch/alpha/include/asm/wrperfmon.h @@ -0,0 +1,93 @@ +/* + * Definitions for use with the Alpha wrperfmon PAL call. + */ + +#ifndef __ALPHA_WRPERFMON_H +#define __ALPHA_WRPERFMON_H + +/* Following commands are implemented on all CPUs */ +#define PERFMON_CMD_DISABLE 0 +#define PERFMON_CMD_ENABLE 1 +#define PERFMON_CMD_DESIRED_EVENTS 2 +#define PERFMON_CMD_LOGGING_OPTIONS 3 +/* Following commands on EV5/EV56/PCA56 only */ +#define PERFMON_CMD_INT_FREQ 4 +#define PERFMON_CMD_ENABLE_CLEAR 7 +/* Following commands are on EV5 and better CPUs */ +#define PERFMON_CMD_READ 5 +#define PERFMON_CMD_WRITE 6 +/* Following command are on EV6 and better CPUs */ +#define PERFMON_CMD_ENABLE_WRITE 7 +/* Following command are on EV67 and better CPUs */ +#define PERFMON_CMD_I_STAT 8 +#define PERFMON_CMD_PMPC 9 + + +/* EV5/EV56/PCA56 Counters */ +#define EV5_PCTR_0 (1UL<<0) +#define EV5_PCTR_1 (1UL<<1) +#define EV5_PCTR_2 (1UL<<2) + +#define EV5_PCTR_0_COUNT_SHIFT 48 +#define EV5_PCTR_1_COUNT_SHIFT 32 +#define EV5_PCTR_2_COUNT_SHIFT 16 + +#define EV5_PCTR_0_COUNT_MASK 0xffffUL +#define EV5_PCTR_1_COUNT_MASK 0xffffUL +#define EV5_PCTR_2_COUNT_MASK 0x3fffUL + +/* EV6 Counters */ +#define EV6_PCTR_0 (1UL<<0) +#define EV6_PCTR_1 (1UL<<1) + +#define EV6_PCTR_0_COUNT_SHIFT 28 +#define EV6_PCTR_1_COUNT_SHIFT 6 + +#define EV6_PCTR_0_COUNT_MASK 0xfffffUL +#define EV6_PCTR_1_COUNT_MASK 0xfffffUL + +/* EV67 (and subsequent) counters */ +#define EV67_PCTR_0 (1UL<<0) +#define EV67_PCTR_1 (1UL<<1) + +#define EV67_PCTR_0_COUNT_SHIFT 28 +#define EV67_PCTR_1_COUNT_SHIFT 6 + +#define EV67_PCTR_0_COUNT_MASK 0xfffffUL +#define EV67_PCTR_1_COUNT_MASK 0xfffffUL + + +/* + * The Alpha Architecure Handbook, vers. 4 (1998) appears to have a misprint + * in Table E-23 regarding the bits that set the event PCTR 1 counts. + * Hopefully what we have here is correct. + */ +#define EV6_PCTR_0_EVENT_MASK 0x10UL +#define EV6_PCTR_1_EVENT_MASK 0x0fUL + +/* EV6 Events */ +#define EV6_PCTR_0_CYCLES (0UL << 4) +#define EV6_PCTR_0_INSTRUCTIONS (1UL << 4) + +#define EV6_PCTR_1_CYCLES 0 +#define EV6_PCTR_1_BRANCHES 1 +#define EV6_PCTR_1_BRANCH_MISPREDICTS 2 +#define EV6_PCTR_1_DTB_SINGLE_MISSES 3 +#define EV6_PCTR_1_DTB_DOUBLE_MISSES 4 +#define EV6_PCTR_1_ITB_MISSES 5 +#define EV6_PCTR_1_UNALIGNED_TRAPS 6 +#define EV6_PCTR_1_REPLY_TRAPS 7 + +/* From the Alpha Architecture Reference Manual, 4th edn., 2002 */ +#define EV67_PCTR_MODE_MASK 0x10UL +#define EV67_PCTR_EVENT_MASK 0x0CUL + +#define EV67_PCTR_MODE_PROFILEME (1UL<<4) +#define EV67_PCTR_MODE_AGGREGATE (0UL<<4) + +#define EV67_PCTR_INSTR_CYCLES (0UL<<2) +#define EV67_PCTR_CYCLES_UNDEF (1UL<<2) +#define EV67_PCTR_INSTR_BCACHEMISS (2UL<<2) +#define EV67_PCTR_CYCLES_MBOX (3UL<<2) + +#endif diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile index 7739a62440a7..1ee9b5b629b8 100644 --- a/arch/alpha/kernel/Makefile +++ b/arch/alpha/kernel/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PCI) += pci.o pci_iommu.o pci-sysfs.o obj-$(CONFIG_SRM_ENV) += srm_env.o obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o ifdef CONFIG_ALPHA_GENERIC @@ -35,7 +36,7 @@ endif obj-y += irq_pyxis.o irq_i8259.o irq_srm.o obj-y += err_ev6.o -obj-y += es1888.o smc37c669.o smc37c93x.o ns87312.o gct.o +obj-y += es1888.o smc37c669.o smc37c93x.o pc873xx.o gct.o obj-y += srmcons.o else @@ -63,11 +64,11 @@ obj-$(CONFIG_ALPHA_WILDFIRE) += core_wildfire.o # Board support obj-$(CONFIG_ALPHA_ALCOR) += sys_alcor.o irq_i8259.o irq_srm.o obj-$(CONFIG_ALPHA_CABRIOLET) += sys_cabriolet.o irq_i8259.o irq_srm.o \ - ns87312.o + pc873xx.o obj-$(CONFIG_ALPHA_EB164) += sys_cabriolet.o irq_i8259.o irq_srm.o \ - ns87312.o + pc873xx.o obj-$(CONFIG_ALPHA_EB66P) += sys_cabriolet.o irq_i8259.o irq_srm.o \ - ns87312.o + pc873xx.o obj-$(CONFIG_ALPHA_LX164) += sys_cabriolet.o irq_i8259.o irq_srm.o \ smc37c93x.o obj-$(CONFIG_ALPHA_PC164) += sys_cabriolet.o irq_i8259.o irq_srm.o \ @@ -90,14 +91,14 @@ obj-$(CONFIG_ALPHA_RUFFIAN) += sys_ruffian.o irq_pyxis.o irq_i8259.o obj-$(CONFIG_ALPHA_RX164) += sys_rx164.o irq_i8259.o obj-$(CONFIG_ALPHA_SABLE) += sys_sable.o obj-$(CONFIG_ALPHA_LYNX) += sys_sable.o -obj-$(CONFIG_ALPHA_BOOK1) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o -obj-$(CONFIG_ALPHA_AVANTI) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o -obj-$(CONFIG_ALPHA_NONAME) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o -obj-$(CONFIG_ALPHA_P2K) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o -obj-$(CONFIG_ALPHA_XL) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o +obj-$(CONFIG_ALPHA_BOOK1) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o +obj-$(CONFIG_ALPHA_AVANTI) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o +obj-$(CONFIG_ALPHA_NONAME) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o +obj-$(CONFIG_ALPHA_P2K) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o +obj-$(CONFIG_ALPHA_XL) += sys_sio.o irq_i8259.o irq_srm.o pc873xx.o obj-$(CONFIG_ALPHA_SX164) += sys_sx164.o irq_pyxis.o irq_i8259.o \ irq_srm.o smc37c669.o -obj-$(CONFIG_ALPHA_TAKARA) += sys_takara.o irq_i8259.o ns87312.o +obj-$(CONFIG_ALPHA_TAKARA) += sys_takara.o irq_i8259.o pc873xx.o obj-$(CONFIG_ALPHA_WILDFIRE) += sys_wildfire.o irq_i8259.o # Error support diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 7f912ba3d9ad..fe912984d9b1 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -31,6 +31,7 @@ #include <asm/uaccess.h> volatile unsigned long irq_err_count; +DEFINE_PER_CPU(unsigned long, irq_pmi_count); void ack_bad_irq(unsigned int irq) { @@ -63,9 +64,7 @@ int irq_select_affinity(unsigned int irq) int show_interrupts(struct seq_file *p, void *v) { -#ifdef CONFIG_SMP int j; -#endif int irq = *(loff_t *) v; struct irqaction * action; unsigned long flags; @@ -112,6 +111,10 @@ unlock: seq_printf(p, "%10lu ", cpu_data[j].ipi_count); seq_putc(p, '\n'); #endif + seq_puts(p, "PMI: "); + for_each_online_cpu(j) + seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); + seq_puts(p, " Performance Monitoring\n"); seq_printf(p, "ERR: %10lu\n", irq_err_count); } return 0; diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index cfde865b78e0..5f77afb88e89 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c @@ -10,6 +10,7 @@ #include <asm/machvec.h> #include <asm/dma.h> +#include <asm/perf_event.h> #include "proto.h" #include "irq_impl.h" @@ -111,6 +112,8 @@ init_IRQ(void) wrent(entInt, 0); alpha_mv.init_irq(); + + init_hw_perf_events(); } /* diff --git a/arch/alpha/kernel/ns87312.c b/arch/alpha/kernel/ns87312.c deleted file mode 100644 index 342b56d24c20..000000000000 --- a/arch/alpha/kernel/ns87312.c +++ /dev/null @@ -1,38 +0,0 @@ -/* - * linux/arch/alpha/kernel/ns87312.c - */ - -#include <linux/init.h> -#include <asm/io.h> -#include "proto.h" - - -/* - * The SRM console *disables* the IDE interface, this code ensures it's - * enabled. - * - * This code bangs on a control register of the 87312 Super I/O chip - * that implements parallel port/serial ports/IDE/FDI. Depending on - * the motherboard, the Super I/O chip can be configured through a - * pair of registers that are located either at I/O ports 0x26e/0x26f - * or 0x398/0x399. Unfortunately, autodetecting which base address is - * in use works only once (right after a reset). The Super I/O chip - * has the additional quirk that configuration register data must be - * written twice (I believe this is a safety feature to prevent - * accidental modification---fun, isn't it?). - */ - -void __init -ns87312_enable_ide(long ide_base) -{ - int data; - unsigned long flags; - - local_irq_save(flags); - outb(0, ide_base); /* set the index register for reg #0 */ - data = inb(ide_base+1); /* read the current contents */ - outb(0, ide_base); /* set the index register for reg #0 */ - outb(data | 0x40, ide_base+1); /* turn on IDE */ - outb(data | 0x40, ide_base+1); /* turn on IDE, really! */ - local_irq_restore(flags); -} diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index de9d39717808..88131c6e42e3 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -234,11 +234,11 @@ linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_st } static int -do_osf_statfs(struct dentry * dentry, struct osf_statfs __user *buffer, +do_osf_statfs(struct path *path, struct osf_statfs __user *buffer, unsigned long bufsiz) { struct kstatfs linux_stat; - int error = vfs_statfs(dentry, &linux_stat); + int error = vfs_statfs(path, &linux_stat); if (!error) error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); return error; @@ -252,7 +252,7 @@ SYSCALL_DEFINE3(osf_statfs, char __user *, pathname, retval = user_path(pathname, &path); if (!retval) { - retval = do_osf_statfs(path.dentry, buffer, bufsiz); + retval = do_osf_statfs(&path buffer, bufsiz); path_put(&path); } return retval; @@ -267,7 +267,7 @@ SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd, retval = -EBADF; file = fget(fd); if (file) { - retval = do_osf_statfs(file->f_path.dentry, buffer, bufsiz); + retval = do_osf_statfs(&file->f_path, buffer, bufsiz); fput(file); } return retval; diff --git a/arch/alpha/kernel/pc873xx.c b/arch/alpha/kernel/pc873xx.c new file mode 100644 index 000000000000..27dcbff85613 --- /dev/null +++ b/arch/alpha/kernel/pc873xx.c @@ -0,0 +1,88 @@ +#include <linux/ioport.h> +#include <asm/io.h> + +#include "pc873xx.h" + +static unsigned pc873xx_probelist[] = {0x398, 0x26e, 0}; + +static char *pc873xx_names[] = { + "PC87303", "PC87306", "PC87312", "PC87332", "PC87334" +}; + +static unsigned int base, model; + + +unsigned int __init pc873xx_get_base() +{ + return base; +} + +char *__init pc873xx_get_model() +{ + return pc873xx_names[model]; +} + +static unsigned char __init pc873xx_read(unsigned int base, int reg) +{ + outb(reg, base); + return inb(base + 1); +} + +static void __init pc873xx_write(unsigned int base, int reg, unsigned char data) +{ + unsigned long flags; + + local_irq_save(flags); + outb(reg, base); + outb(data, base + 1); + outb(data, base + 1); /* Must be written twice */ + local_irq_restore(flags); +} + +int __init pc873xx_probe(void) +{ + int val, index = 0; + + while ((base = pc873xx_probelist[index++])) { + + if (request_region(base, 2, "Super IO PC873xx") == NULL) + continue; + + val = pc873xx_read(base, REG_SID); + if ((val & 0xf0) == 0x10) { + model = PC87332; + break; + } else if ((val & 0xf8) == 0x70) { + model = PC87306; + break; + } else if ((val & 0xf8) == 0x50) { + model = PC87334; + break; + } else if ((val & 0xf8) == 0x40) { + model = PC87303; + break; + } + + release_region(base, 2); + } + + return (base == 0) ? -1 : 1; +} + +void __init pc873xx_enable_epp19(void) +{ + unsigned char data; + + printk(KERN_INFO "PC873xx enabling EPP v1.9\n"); + data = pc873xx_read(base, REG_PCR); + pc873xx_write(base, REG_PCR, (data & 0xFC) | 0x02); +} + +void __init pc873xx_enable_ide(void) +{ + unsigned char data; + + printk(KERN_INFO "PC873xx enabling IDE interrupt\n"); + data = pc873xx_read(base, REG_FER); + pc873xx_write(base, REG_FER, data | 0x40); +} diff --git a/arch/alpha/kernel/pc873xx.h b/arch/alpha/kernel/pc873xx.h new file mode 100644 index 000000000000..25e16956fe3e --- /dev/null +++ b/arch/alpha/kernel/pc873xx.h @@ -0,0 +1,35 @@ + +#ifndef _PC873xx_H_ +#define _PC873xx_H_ + +/* + * Control Register Values + */ +#define REG_FER 0x00 +#define REG_FAR 0x01 +#define REG_PTR 0x02 +#define REG_FCR 0x03 +#define REG_PCR 0x04 +#define REG_KRR 0x05 +#define REG_PMC 0x06 +#define REG_TUP 0x07 +#define REG_SID 0x08 +#define REG_ASC 0x09 +#define REG_IRC 0x0e + +/* + * Model numbers + */ +#define PC87303 0 +#define PC87306 1 +#define PC87312 2 +#define PC87332 3 +#define PC87334 4 + +int pc873xx_probe(void); +unsigned int pc873xx_get_base(void); +char *pc873xx_get_model(void); +void pc873xx_enable_epp19(void); +void pc873xx_enable_ide(void); + +#endif diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c index d979e7c7bc4b..738fc824e2ea 100644 --- a/arch/alpha/kernel/pci-sysfs.c +++ b/arch/alpha/kernel/pci-sysfs.c @@ -60,7 +60,8 @@ static int __pci_mmap_fits(struct pci_dev *pdev, int num, * * Use the bus mapping routines to map a PCI resource into userspace. */ -static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, +static int pci_mmap_resource(struct kobject *kobj, + struct bin_attribute *attr, struct vm_area_struct *vma, int sparse) { struct pci_dev *pdev = to_pci_dev(container_of(kobj, @@ -89,14 +90,14 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, return hose_mmap_page_range(pdev->sysdata, vma, mmap_type, sparse); } -static int pci_mmap_resource_sparse(struct kobject *kobj, +static int pci_mmap_resource_sparse(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) { return pci_mmap_resource(kobj, attr, vma, 1); } -static int pci_mmap_resource_dense(struct kobject *kobj, +static int pci_mmap_resource_dense(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) { diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c new file mode 100644 index 000000000000..51c39fa41693 --- /dev/null +++ b/arch/alpha/kernel/perf_event.c @@ -0,0 +1,842 @@ +/* + * Hardware performance events for the Alpha. + * + * We implement HW counts on the EV67 and subsequent CPUs only. + * + * (C) 2010 Michael J. Cree + * + * Somewhat based on the Sparc code, and to a lesser extent the PowerPC and + * ARM code, which are copyright by their respective authors. + */ + +#include <linux/perf_event.h> +#include <linux/kprobes.h> +#include <linux/kernel.h> +#include <linux/kdebug.h> +#include <linux/mutex.h> + +#include <asm/hwrpb.h> +#include <asm/atomic.h> +#include <asm/irq.h> +#include <asm/irq_regs.h> +#include <asm/pal.h> +#include <asm/wrperfmon.h> +#include <asm/hw_irq.h> + + +/* The maximum number of PMCs on any Alpha CPU whatsoever. */ +#define MAX_HWEVENTS 3 +#define PMC_NO_INDEX -1 + +/* For tracking PMCs and the hw events they monitor on each CPU. */ +struct cpu_hw_events { + int enabled; + /* Number of events scheduled; also number entries valid in arrays below. */ + int n_events; + /* Number events added since last hw_perf_disable(). */ + int n_added; + /* Events currently scheduled. */ + struct perf_event *event[MAX_HWEVENTS]; + /* Event type of each scheduled event. */ + unsigned long evtype[MAX_HWEVENTS]; + /* Current index of each scheduled event; if not yet determined + * contains PMC_NO_INDEX. + */ + int current_idx[MAX_HWEVENTS]; + /* The active PMCs' config for easy use with wrperfmon(). */ + unsigned long config; + /* The active counters' indices for easy use with wrperfmon(). */ + unsigned long idx_mask; +}; +DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); + + + +/* + * A structure to hold the description of the PMCs available on a particular + * type of Alpha CPU. + */ +struct alpha_pmu_t { + /* Mapping of the perf system hw event types to indigenous event types */ + const int *event_map; + /* The number of entries in the event_map */ + int max_events; + /* The number of PMCs on this Alpha */ + int num_pmcs; + /* + * All PMC counters reside in the IBOX register PCTR. This is the + * LSB of the counter. + */ + int pmc_count_shift[MAX_HWEVENTS]; + /* + * The mask that isolates the PMC bits when the LSB of the counter + * is shifted to bit 0. + */ + unsigned long pmc_count_mask[MAX_HWEVENTS]; + /* The maximum period the PMC can count. */ + unsigned long pmc_max_period[MAX_HWEVENTS]; + /* + * The maximum value that may be written to the counter due to + * hardware restrictions is pmc_max_period - pmc_left. + */ + long pmc_left[3]; + /* Subroutine for allocation of PMCs. Enforces constraints. */ + int (*check_constraints)(struct perf_event **, unsigned long *, int); +}; + +/* + * The Alpha CPU PMU description currently in operation. This is set during + * the boot process to the specific CPU of the machine. + */ +static const struct alpha_pmu_t *alpha_pmu; + + +#define HW_OP_UNSUPPORTED -1 + +/* + * The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs + * follow. Since they are identical we refer to them collectively as the + * EV67 henceforth. + */ + +/* + * EV67 PMC event types + * + * There is no one-to-one mapping of the possible hw event types to the + * actual codes that are used to program the PMCs hence we introduce our + * own hw event type identifiers. + */ +enum ev67_pmc_event_type { + EV67_CYCLES = 1, + EV67_INSTRUCTIONS, + EV67_BCACHEMISS, + EV67_MBOXREPLAY, + EV67_LAST_ET +}; +#define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES) + + +/* Mapping of the hw event types to the perf tool interface */ +static const int ev67_perfmon_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = EV67_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = EV67_INSTRUCTIONS, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = EV67_BCACHEMISS, +}; + +struct ev67_mapping_t { + int config; + int idx; +}; + +/* + * The mapping used for one event only - these must be in same order as enum + * ev67_pmc_event_type definition. + */ +static const struct ev67_mapping_t ev67_mapping[] = { + {EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */ + {EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */ + {EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */ + {EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */ +}; + + +/* + * Check that a group of events can be simultaneously scheduled on to the + * EV67 PMU. Also allocate counter indices and config. + */ +static int ev67_check_constraints(struct perf_event **event, + unsigned long *evtype, int n_ev) +{ + int idx0; + unsigned long config; + + idx0 = ev67_mapping[evtype[0]-1].idx; + config = ev67_mapping[evtype[0]-1].config; + if (n_ev == 1) + goto success; + + BUG_ON(n_ev != 2); + + if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) { + /* MBOX replay traps must be on PMC 1 */ + idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0; + /* Only cycles can accompany MBOX replay traps */ + if (evtype[idx0] == EV67_CYCLES) { + config = EV67_PCTR_CYCLES_MBOX; + goto success; + } + } + + if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) { + /* Bcache misses must be on PMC 1 */ + idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0; + /* Only instructions can accompany Bcache misses */ + if (evtype[idx0] == EV67_INSTRUCTIONS) { + config = EV67_PCTR_INSTR_BCACHEMISS; + goto success; + } + } + + if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) { + /* Instructions must be on PMC 0 */ + idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1; + /* By this point only cycles can accompany instructions */ + if (evtype[idx0^1] == EV67_CYCLES) { + config = EV67_PCTR_INSTR_CYCLES; + goto success; + } + } + + /* Otherwise, darn it, there is a conflict. */ + return -1; + +success: + event[0]->hw.idx = idx0; + event[0]->hw.config_base = config; + if (n_ev == 2) { + event[1]->hw.idx = idx0 ^ 1; + event[1]->hw.config_base = config; + } + return 0; +} + + +static const struct alpha_pmu_t ev67_pmu = { + .event_map = ev67_perfmon_event_map, + .max_events = ARRAY_SIZE(ev67_perfmon_event_map), + .num_pmcs = 2, + .pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0}, + .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0}, + .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0}, + .pmc_left = {16, 4, 0}, + .check_constraints = ev67_check_constraints +}; + + + +/* + * Helper routines to ensure that we read/write only the correct PMC bits + * when calling the wrperfmon PALcall. + */ +static inline void alpha_write_pmc(int idx, unsigned long val) +{ + val &= alpha_pmu->pmc_count_mask[idx]; + val <<= alpha_pmu->pmc_count_shift[idx]; + val |= (1<<idx); + wrperfmon(PERFMON_CMD_WRITE, val); +} + +static inline unsigned long alpha_read_pmc(int idx) +{ + unsigned long val; + + val = wrperfmon(PERFMON_CMD_READ, 0); + val >>= alpha_pmu->pmc_count_shift[idx]; + val &= alpha_pmu->pmc_count_mask[idx]; + return val; +} + +/* Set a new period to sample over */ +static int alpha_perf_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + long left = atomic64_read(&hwc->period_left); + long period = hwc->sample_period; + int ret = 0; + + if (unlikely(left <= -period)) { + left = period; + atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + /* + * Hardware restrictions require that the counters must not be + * written with values that are too close to the maximum period. + */ + if (unlikely(left < alpha_pmu->pmc_left[idx])) + left = alpha_pmu->pmc_left[idx]; + + if (left > (long)alpha_pmu->pmc_max_period[idx]) + left = alpha_pmu->pmc_max_period[idx]; + + atomic64_set(&hwc->prev_count, (unsigned long)(-left)); + + alpha_write_pmc(idx, (unsigned long)(-left)); + + perf_event_update_userpage(event); + + return ret; +} + + +/* + * Calculates the count (the 'delta') since the last time the PMC was read. + * + * As the PMCs' full period can easily be exceeded within the perf system + * sampling period we cannot use any high order bits as a guard bit in the + * PMCs to detect overflow as is done by other architectures. The code here + * calculates the delta on the basis that there is no overflow when ovf is + * zero. The value passed via ovf by the interrupt handler corrects for + * overflow. + * + * This can be racey on rare occasions -- a call to this routine can occur + * with an overflowed counter just before the PMI service routine is called. + * The check for delta negative hopefully always rectifies this situation. + */ +static unsigned long alpha_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx, long ovf) +{ + long prev_raw_count, new_raw_count; + long delta; + +again: + prev_raw_count = atomic64_read(&hwc->prev_count); + new_raw_count = alpha_read_pmc(idx); + + if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; + + /* It is possible on very rare occasions that the PMC has overflowed + * but the interrupt is yet to come. Detect and fix this situation. + */ + if (unlikely(delta < 0)) { + delta += alpha_pmu->pmc_max_period[idx] + 1; + } + + atomic64_add(delta, &event->count); + atomic64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + + +/* + * Collect all HW events into the array event[]. + */ +static int collect_events(struct perf_event *group, int max_count, + struct perf_event *event[], unsigned long *evtype, + int *current_idx) +{ + struct perf_event *pe; + int n = 0; + + if (!is_software_event(group)) { + if (n >= max_count) + return -1; + event[n] = group; + evtype[n] = group->hw.event_base; + current_idx[n++] = PMC_NO_INDEX; + } + list_for_each_entry(pe, &group->sibling_list, group_entry) { + if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) { + if (n >= max_count) + return -1; + event[n] = pe; + evtype[n] = pe->hw.event_base; + current_idx[n++] = PMC_NO_INDEX; + } + } + return n; +} + + + +/* + * Check that a group of events can be simultaneously scheduled on to the PMU. + */ +static int alpha_check_constraints(struct perf_event **events, + unsigned long *evtypes, int n_ev) +{ + + /* No HW events is possible from hw_perf_group_sched_in(). */ + if (n_ev == 0) + return 0; + + if (n_ev > alpha_pmu->num_pmcs) + return -1; + + return alpha_pmu->check_constraints(events, evtypes, n_ev); +} + + +/* + * If new events have been scheduled then update cpuc with the new + * configuration. This may involve shifting cycle counts from one PMC to + * another. + */ +static void maybe_change_configuration(struct cpu_hw_events *cpuc) +{ + int j; + + if (cpuc->n_added == 0) + return; + + /* Find counters that are moving to another PMC and update */ + for (j = 0; j < cpuc->n_events; j++) { + struct perf_event *pe = cpuc->event[j]; + + if (cpuc->current_idx[j] != PMC_NO_INDEX && + cpuc->current_idx[j] != pe->hw.idx) { + alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0); + cpuc->current_idx[j] = PMC_NO_INDEX; + } + } + + /* Assign to counters all unassigned events. */ + cpuc->idx_mask = 0; + for (j = 0; j < cpuc->n_events; j++) { + struct perf_event *pe = cpuc->event[j]; + struct hw_perf_event *hwc = &pe->hw; + int idx = hwc->idx; + + if (cpuc->current_idx[j] != PMC_NO_INDEX) { + cpuc->idx_mask |= (1<<cpuc->current_idx[j]); + continue; + } + + alpha_perf_event_set_period(pe, hwc, idx); + cpuc->current_idx[j] = idx; + cpuc->idx_mask |= (1<<cpuc->current_idx[j]); + } + cpuc->config = cpuc->event[0]->hw.config_base; +} + + + +/* Schedule perf HW event on to PMU. + * - this function is called from outside this module via the pmu struct + * returned from perf event initialisation. + */ +static int alpha_pmu_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int n0; + int ret; + unsigned long flags; + + /* + * The Sparc code has the IRQ disable first followed by the perf + * disable, however this can lead to an overflowed counter with the + * PMI disabled on rare occasions. The alpha_perf_event_update() + * routine should detect this situation by noting a negative delta, + * nevertheless we disable the PMCs first to enable a potential + * final PMI to occur before we disable interrupts. + */ + perf_disable(); + local_irq_save(flags); + + /* Default to error to be returned */ + ret = -EAGAIN; + + /* Insert event on to PMU and if successful modify ret to valid return */ + n0 = cpuc->n_events; + if (n0 < alpha_pmu->num_pmcs) { + cpuc->event[n0] = event; + cpuc->evtype[n0] = event->hw.event_base; + cpuc->current_idx[n0] = PMC_NO_INDEX; + + if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) { + cpuc->n_events++; + cpuc->n_added++; + ret = 0; + } + } + + local_irq_restore(flags); + perf_enable(); + + return ret; +} + + + +/* Disable performance monitoring unit + * - this function is called from outside this module via the pmu struct + * returned from perf event initialisation. + */ +static void alpha_pmu_disable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + int j; + + perf_disable(); + local_irq_save(flags); + + for (j = 0; j < cpuc->n_events; j++) { + if (event == cpuc->event[j]) { + int idx = cpuc->current_idx[j]; + + /* Shift remaining entries down into the existing + * slot. + */ + while (++j < cpuc->n_events) { + cpuc->event[j - 1] = cpuc->event[j]; + cpuc->evtype[j - 1] = cpuc->evtype[j]; + cpuc->current_idx[j - 1] = + cpuc->current_idx[j]; + } + + /* Absorb the final count and turn off the event. */ + alpha_perf_event_update(event, hwc, idx, 0); + perf_event_update_userpage(event); + + cpuc->idx_mask &= ~(1UL<<idx); + cpuc->n_events--; + break; + } + } + + local_irq_restore(flags); + perf_enable(); +} + + +static void alpha_pmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + alpha_perf_event_update(event, hwc, hwc->idx, 0); +} + + +static void alpha_pmu_unthrottle(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + cpuc->idx_mask |= 1UL<<hwc->idx; + wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); +} + + +/* + * Check that CPU performance counters are supported. + * - currently support EV67 and later CPUs. + * - actually some later revisions of the EV6 have the same PMC model as the + * EV67 but we don't do suffiently deep CPU detection to detect them. + * Bad luck to the very few people who might have one, I guess. + */ +static int supported_cpu(void) +{ + struct percpu_struct *cpu; + unsigned long cputype; + + /* Get cpu type from HW */ + cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset); + cputype = cpu->type & 0xffffffff; + /* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */ + return (cputype >= EV67_CPU) && (cputype <= EV69_CPU); +} + + + +static void hw_perf_event_destroy(struct perf_event *event) +{ + /* Nothing to be done! */ + return; +} + + + +static int __hw_perf_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + struct perf_event *evts[MAX_HWEVENTS]; + unsigned long evtypes[MAX_HWEVENTS]; + int idx_rubbish_bin[MAX_HWEVENTS]; + int ev; + int n; + + /* We only support a limited range of HARDWARE event types with one + * only programmable via a RAW event type. + */ + if (attr->type == PERF_TYPE_HARDWARE) { + if (attr->config >= alpha_pmu->max_events) + return -EINVAL; + ev = alpha_pmu->event_map[attr->config]; + } else if (attr->type == PERF_TYPE_HW_CACHE) { + return -EOPNOTSUPP; + } else if (attr->type == PERF_TYPE_RAW) { + ev = attr->config & 0xff; + } else { + return -EOPNOTSUPP; + } + + if (ev < 0) { + return ev; + } + + /* The EV67 does not support mode exclusion */ + if (attr->exclude_kernel || attr->exclude_user + || attr->exclude_hv || attr->exclude_idle) { + return -EPERM; + } + + /* + * We place the event type in event_base here and leave calculation + * of the codes to programme the PMU for alpha_pmu_enable() because + * it is only then we will know what HW events are actually + * scheduled on to the PMU. At that point the code to programme the + * PMU is put into config_base and the PMC to use is placed into + * idx. We initialise idx (below) to PMC_NO_INDEX to indicate that + * it is yet to be determined. + */ + hwc->event_base = ev; + + /* Collect events in a group together suitable for calling + * alpha_check_constraints() to verify that the group as a whole can + * be scheduled on to the PMU. + */ + n = 0; + if (event->group_leader != event) { + n = collect_events(event->group_leader, + alpha_pmu->num_pmcs - 1, + evts, evtypes, idx_rubbish_bin); + if (n < 0) + return -EINVAL; + } + evtypes[n] = hwc->event_base; + evts[n] = event; + + if (alpha_check_constraints(evts, evtypes, n + 1)) + return -EINVAL; + + /* Indicate that PMU config and idx are yet to be determined. */ + hwc->config_base = 0; + hwc->idx = PMC_NO_INDEX; + + event->destroy = hw_perf_event_destroy; + + /* + * Most architectures reserve the PMU for their use at this point. + * As there is no existing mechanism to arbitrate usage and there + * appears to be no other user of the Alpha PMU we just assume + * that we can just use it, hence a NO-OP here. + * + * Maybe an alpha_reserve_pmu() routine should be implemented but is + * anything else ever going to use it? + */ + + if (!hwc->sample_period) { + hwc->sample_period = alpha_pmu->pmc_max_period[0]; + hwc->last_period = hwc->sample_period; + atomic64_set(&hwc->period_left, hwc->sample_period); + } + + return 0; +} + +static const struct pmu pmu = { + .enable = alpha_pmu_enable, + .disable = alpha_pmu_disable, + .read = alpha_pmu_read, + .unthrottle = alpha_pmu_unthrottle, +}; + + +/* + * Main entry point to initialise a HW performance event. + */ +const struct pmu *hw_perf_event_init(struct perf_event *event) +{ + int err; + + if (!alpha_pmu) + return ERR_PTR(-ENODEV); + + /* Do the real initialisation work. */ + err = __hw_perf_event_init(event); + + if (err) + return ERR_PTR(err); + + return &pmu; +} + + + +/* + * Main entry point - enable HW performance counters. + */ +void hw_perf_enable(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (cpuc->enabled) + return; + + cpuc->enabled = 1; + barrier(); + + if (cpuc->n_events > 0) { + /* Update cpuc with information from any new scheduled events. */ + maybe_change_configuration(cpuc); + + /* Start counting the desired events. */ + wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE); + wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config); + wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); + } +} + + +/* + * Main entry point - disable HW performance counters. + */ + +void hw_perf_disable(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (!cpuc->enabled) + return; + + cpuc->enabled = 0; + cpuc->n_added = 0; + + wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); +} + + +/* + * Main entry point - don't know when this is called but it + * obviously dumps debug info. + */ +void perf_event_print_debug(void) +{ + unsigned long flags; + unsigned long pcr; + int pcr0, pcr1; + int cpu; + + if (!supported_cpu()) + return; + + local_irq_save(flags); + + cpu = smp_processor_id(); + + pcr = wrperfmon(PERFMON_CMD_READ, 0); + pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0]; + pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1]; + + pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1); + + local_irq_restore(flags); +} + + +/* + * Performance Monitoring Interrupt Service Routine called when a PMC + * overflows. The PMC that overflowed is passed in la_ptr. + */ +static void alpha_perf_event_irq_handler(unsigned long la_ptr, + struct pt_regs *regs) +{ + struct cpu_hw_events *cpuc; + struct perf_sample_data data; + struct perf_event *event; + struct hw_perf_event *hwc; + int idx, j; + + __get_cpu_var(irq_pmi_count)++; + cpuc = &__get_cpu_var(cpu_hw_events); + + /* Completely counting through the PMC's period to trigger a new PMC + * overflow interrupt while in this interrupt routine is utterly + * disastrous! The EV6 and EV67 counters are sufficiently large to + * prevent this but to be really sure disable the PMCs. + */ + wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); + + /* la_ptr is the counter that overflowed. */ + if (unlikely(la_ptr >= perf_max_events)) { + /* This should never occur! */ + irq_err_count++; + pr_warning("PMI: silly index %ld\n", la_ptr); + wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); + return; + } + + idx = la_ptr; + + perf_sample_data_init(&data, 0); + for (j = 0; j < cpuc->n_events; j++) { + if (cpuc->current_idx[j] == idx) + break; + } + + if (unlikely(j == cpuc->n_events)) { + /* This can occur if the event is disabled right on a PMC overflow. */ + wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); + return; + } + + event = cpuc->event[j]; + + if (unlikely(!event)) { + /* This should never occur! */ + irq_err_count++; + pr_warning("PMI: No event at index %d!\n", idx); + wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); + return; + } + + hwc = &event->hw; + alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1); + data.period = event->hw.last_period; + + if (alpha_perf_event_set_period(event, hwc, idx)) { + if (perf_event_overflow(event, 1, &data, regs)) { + /* Interrupts coming too quickly; "throttle" the + * counter, i.e., disable it for a little while. + */ + cpuc->idx_mask &= ~(1UL<<idx); + } + } + wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); + + return; +} + + + +/* + * Init call to initialise performance events at kernel startup. + */ +void __init init_hw_perf_events(void) +{ + pr_info("Performance events: "); + + if (!supported_cpu()) { + pr_cont("No support for your CPU.\n"); + return; + } + + pr_cont("Supported CPU type!\n"); + + /* Override performance counter IRQ vector */ + + perf_irq = alpha_perf_event_irq_handler; + + /* And set up PMU specification */ + alpha_pmu = &ev67_pmu; + perf_max_events = alpha_pmu->num_pmcs; +} + diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c index d4327e461c22..85b4aea01ef8 100644 --- a/arch/alpha/kernel/sys_sio.c +++ b/arch/alpha/kernel/sys_sio.c @@ -34,6 +34,7 @@ #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" +#include "pc873xx.h" #if defined(ALPHA_RESTORE_SRM_SETUP) /* Save LCA configuration data as the console had it set up. */ @@ -208,7 +209,27 @@ noname_init_pci(void) common_init_pci(); sio_pci_route(); sio_fixup_irq_levels(sio_collect_irq_levels()); - ns87312_enable_ide(0x26e); + + if (pc873xx_probe() == -1) { + printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n"); + } else { + printk(KERN_INFO "Found %s Super IO chip at 0x%x\n", + pc873xx_get_model(), pc873xx_get_base()); + + /* Enabling things in the Super IO chip doesn't actually + * configure and enable things, the legacy drivers still + * need to do the actual configuration and enabling. + * This only unblocks them. + */ + +#if !defined(CONFIG_ALPHA_AVANTI) + /* Don't bother on the Avanti family. + * None of them had on-board IDE. + */ + pc873xx_enable_ide(); +#endif + pc873xx_enable_epp19(); + } } static inline void __init diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 5d0826654c61..eacceb26d9c8 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -41,6 +41,7 @@ #include <linux/init.h> #include <linux/bcd.h> #include <linux/profile.h> +#include <linux/perf_event.h> #include <asm/uaccess.h> #include <asm/io.h> @@ -51,6 +52,7 @@ #include <linux/mc146818rtc.h> #include <linux/time.h> #include <linux/timex.h> +#include <linux/clocksource.h> #include "proto.h" #include "irq_impl.h" @@ -75,14 +77,32 @@ static struct { __u32 last_time; /* ticks/cycle * 2^48 */ unsigned long scaled_ticks_per_cycle; - /* last time the CMOS clock got updated */ - time_t last_rtc_update; /* partial unused tick */ unsigned long partial_tick; } state; unsigned long est_cycle_freq; +#ifdef CONFIG_PERF_EVENTS + +DEFINE_PER_CPU(u8, perf_event_pending); + +#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 +#define test_perf_event_pending() __get_cpu_var(perf_event_pending) +#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 + +void set_perf_event_pending(void) +{ + set_perf_event_pending_flag(); +} + +#else /* CONFIG_PERF_EVENTS */ + +#define test_perf_event_pending() 0 +#define clear_perf_event_pending() + +#endif /* CONFIG_PERF_EVENTS */ + static inline __u32 rpcc(void) { @@ -91,6 +111,52 @@ static inline __u32 rpcc(void) return result; } +int update_persistent_clock(struct timespec now) +{ + return set_rtc_mmss(now.tv_sec); +} + +void read_persistent_clock(struct timespec *ts) +{ + unsigned int year, mon, day, hour, min, sec, epoch; + + sec = CMOS_READ(RTC_SECONDS); + min = CMOS_READ(RTC_MINUTES); + hour = CMOS_READ(RTC_HOURS); + day = CMOS_READ(RTC_DAY_OF_MONTH); + mon = CMOS_READ(RTC_MONTH); + year = CMOS_READ(RTC_YEAR); + + if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + sec = bcd2bin(sec); + min = bcd2bin(min); + hour = bcd2bin(hour); + day = bcd2bin(day); + mon = bcd2bin(mon); + year = bcd2bin(year); + } + + /* PC-like is standard; used for year >= 70 */ + epoch = 1900; + if (year < 20) + epoch = 2000; + else if (year >= 20 && year < 48) + /* NT epoch */ + epoch = 1980; + else if (year >= 48 && year < 70) + /* Digital UNIX epoch */ + epoch = 1952; + + printk(KERN_INFO "Using epoch = %d\n", epoch); + + if ((year += epoch) < 1970) + year += 100; + + ts->tv_sec = mktime(year, mon, day, hour, min, sec); +} + + + /* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "do_timer()" routine every clocktick @@ -123,19 +189,6 @@ irqreturn_t timer_interrupt(int irq, void *dev) if (nticks) do_timer(nticks); - /* - * If we have an externally synchronized Linux clock, then update - * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be - * called as close as possible to 500 ms before the new second starts. - */ - if (ntp_synced() - && xtime.tv_sec > state.last_rtc_update + 660 - && xtime.tv_nsec >= 500000 - ((unsigned) TICK_SIZE) / 2 - && xtime.tv_nsec <= 500000 + ((unsigned) TICK_SIZE) / 2) { - int tmp = set_rtc_mmss(xtime.tv_sec); - state.last_rtc_update = xtime.tv_sec - (tmp ? 600 : 0); - } - write_sequnlock(&xtime_lock); #ifndef CONFIG_SMP @@ -143,6 +196,11 @@ irqreturn_t timer_interrupt(int irq, void *dev) update_process_times(user_mode(get_irq_regs())); #endif + if (test_perf_event_pending()) { + clear_perf_event_pending(); + perf_event_do_pending(); + } + return IRQ_HANDLED; } @@ -301,10 +359,38 @@ rpcc_after_update_in_progress(void) return rpcc(); } +#ifndef CONFIG_SMP +/* Until and unless we figure out how to get cpu cycle counters + in sync and keep them there, we can't use the rpcc. */ +static cycle_t read_rpcc(struct clocksource *cs) +{ + cycle_t ret = (cycle_t)rpcc(); + return ret; +} + +static struct clocksource clocksource_rpcc = { + .name = "rpcc", + .rating = 300, + .read = read_rpcc, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS +}; + +static inline void register_rpcc_clocksource(long cycle_freq) +{ + clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4); + clocksource_register(&clocksource_rpcc); +} +#else /* !CONFIG_SMP */ +static inline void register_rpcc_clocksource(long cycle_freq) +{ +} +#endif /* !CONFIG_SMP */ + void __init time_init(void) { - unsigned int year, mon, day, hour, min, sec, cc1, cc2, epoch; + unsigned int cc1, cc2; unsigned long cycle_freq, tolerance; long diff; @@ -348,53 +434,17 @@ time_init(void) bogomips yet, but this is close on a 500Mhz box. */ __delay(1000000); - sec = CMOS_READ(RTC_SECONDS); - min = CMOS_READ(RTC_MINUTES); - hour = CMOS_READ(RTC_HOURS); - day = CMOS_READ(RTC_DAY_OF_MONTH); - mon = CMOS_READ(RTC_MONTH); - year = CMOS_READ(RTC_YEAR); - - if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - sec = bcd2bin(sec); - min = bcd2bin(min); - hour = bcd2bin(hour); - day = bcd2bin(day); - mon = bcd2bin(mon); - year = bcd2bin(year); - } - - /* PC-like is standard; used for year >= 70 */ - epoch = 1900; - if (year < 20) - epoch = 2000; - else if (year >= 20 && year < 48) - /* NT epoch */ - epoch = 1980; - else if (year >= 48 && year < 70) - /* Digital UNIX epoch */ - epoch = 1952; - - printk(KERN_INFO "Using epoch = %d\n", epoch); - - if ((year += epoch) < 1970) - year += 100; - - xtime.tv_sec = mktime(year, mon, day, hour, min, sec); - xtime.tv_nsec = 0; - - wall_to_monotonic.tv_sec -= xtime.tv_sec; - wall_to_monotonic.tv_nsec = 0; if (HZ > (1<<16)) { extern void __you_loose (void); __you_loose(); } + register_rpcc_clocksource(cycle_freq); + state.last_time = cc1; state.scaled_ticks_per_cycle = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq; - state.last_rtc_update = 0; state.partial_tick = 0L; /* Startup the timer source. */ @@ -402,44 +452,6 @@ time_init(void) } /* - * Use the cycle counter to estimate an displacement from the last time - * tick. Unfortunately the Alpha designers made only the low 32-bits of - * the cycle counter active, so we overflow on 8.2 seconds on a 500MHz - * part. So we can't do the "find absolute time in terms of cycles" thing - * that the other ports do. - */ -u32 arch_gettimeoffset(void) -{ -#ifdef CONFIG_SMP - /* Until and unless we figure out how to get cpu cycle counters - in sync and keep them there, we can't use the rpcc tricks. */ - return 0; -#else - unsigned long delta_cycles, delta_usec, partial_tick; - - delta_cycles = rpcc() - state.last_time; - partial_tick = state.partial_tick; - /* - * usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks) - * = cycles * (s_t_p_c) * 1e6 / (2**48 * ticks) - * = cycles * (s_t_p_c) * 15625 / (2**42 * ticks) - * - * which, given a 600MHz cycle and a 1024Hz tick, has a - * dynamic range of about 1.7e17, which is less than the - * 1.8e19 in an unsigned long, so we are safe from overflow. - * - * Round, but with .5 up always, since .5 to even is harder - * with no clear gain. - */ - - delta_usec = (delta_cycles * state.scaled_ticks_per_cycle - + partial_tick) * 15625; - delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2; - return delta_usec * 1000; -#endif -} - -/* * In order to set the CMOS clock precisely, set_rtc_mmss has to be * called 500 ms after the second nowtime has started, because when * nowtime is written into the registers of the CMOS clock, it will diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 00a31deaa96e..fadd5f882ff9 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -142,7 +142,6 @@ do_page_fault(unsigned long address, unsigned long mmcsr, goto bad_area; } - survive: /* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo the fault. */ @@ -188,16 +187,10 @@ do_page_fault(unsigned long address, unsigned long mmcsr, /* We ran out of memory, or some other thing happened to us that made us unable to handle the page fault gracefully. */ out_of_memory: - if (is_global_init(current)) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } - printk(KERN_ALERT "VM: killing process %s(%d)\n", - current->comm, task_pid_nr(current)); if (!user_mode(regs)) goto no_context; - do_group_exit(SIGKILL); + pagefault_out_of_memory(); + return; do_sigbus: /* Send a sigbus, regardless of whether we were in kernel |