summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/kernel-api.tmpl54
-rw-r--r--Documentation/fb/gxfb.txt52
-rw-r--r--Documentation/fb/intelfb.txt2
-rw-r--r--Documentation/fb/lxfb.txt52
-rw-r--r--Documentation/fb/metronomefb.txt16
-rw-r--r--Documentation/fb/modedb.txt4
-rw-r--r--Documentation/feature-removal-schedule.txt9
-rw-r--r--Documentation/filesystems/Locking3
-rw-r--r--Documentation/filesystems/tmpfs.txt12
-rw-r--r--Documentation/filesystems/vfat.txt15
-rw-r--r--Documentation/gpio.txt10
-rw-r--r--Documentation/ia64/kvm.txt82
-rw-r--r--Documentation/ide/ide-tape.txt211
-rw-r--r--Documentation/ide/ide.txt107
-rw-r--r--Documentation/ioctl-number.txt2
-rw-r--r--Documentation/kernel-parameters.txt4
-rw-r--r--Documentation/kprobes.txt51
-rw-r--r--Documentation/md.txt6
-rw-r--r--Documentation/powerpc/booting-without-of.txt33
-rw-r--r--Documentation/powerpc/kvm_440.txt41
-rw-r--r--Documentation/s390/kvm.txt125
-rw-r--r--Documentation/spi/spidev168
-rw-r--r--Documentation/spi/spidev_fdx.c158
-rw-r--r--Documentation/vm/numa_memory_policy.txt281
-rw-r--r--Kbuild62
-rw-r--r--MAINTAINERS17
-rw-r--r--arch/alpha/kernel/core_marvel.c6
-rw-r--r--arch/alpha/kernel/core_t2.c24
-rw-r--r--arch/alpha/kernel/core_titan.c34
-rw-r--r--arch/alpha/kernel/core_tsunami.c28
-rw-r--r--arch/alpha/kernel/module.c6
-rw-r--r--arch/alpha/kernel/pci.c2
-rw-r--r--arch/alpha/kernel/pci_iommu.c34
-rw-r--r--arch/alpha/kernel/smp.c4
-rw-r--r--arch/alpha/kernel/srm_env.c2
-rw-r--r--arch/alpha/kernel/sys_alcor.c2
-rw-r--r--arch/alpha/kernel/sys_marvel.c12
-rw-r--r--arch/alpha/kernel/sys_sable.c6
-rw-r--r--arch/alpha/kernel/sys_sio.c2
-rw-r--r--arch/alpha/kernel/traps.c5
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c11
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c14
-rw-r--r--arch/avr32/kernel/setup.c2
-rw-r--r--arch/cris/mm/init.c1
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/Makefile1
-rw-r--r--arch/ia64/kvm/Kconfig49
-rw-r--r--arch/ia64/kvm/Makefile61
-rw-r--r--arch/ia64/kvm/asm-offsets.c251
-rw-r--r--arch/ia64/kvm/kvm-ia64.c1806
-rw-r--r--arch/ia64/kvm/kvm_fw.c500
-rw-r--r--arch/ia64/kvm/kvm_minstate.h273
-rw-r--r--arch/ia64/kvm/lapic.h25
-rw-r--r--arch/ia64/kvm/misc.h93
-rw-r--r--arch/ia64/kvm/mmio.c341
-rw-r--r--arch/ia64/kvm/optvfault.S918
-rw-r--r--arch/ia64/kvm/process.c970
-rw-r--r--arch/ia64/kvm/trampoline.S1038
-rw-r--r--arch/ia64/kvm/vcpu.c2163
-rw-r--r--arch/ia64/kvm/vcpu.h740
-rw-r--r--arch/ia64/kvm/vmm.c66
-rw-r--r--arch/ia64/kvm/vmm_ivt.S1424
-rw-r--r--arch/ia64/kvm/vti.h290
-rw-r--r--arch/ia64/kvm/vtlb.c636
-rw-r--r--arch/ia64/mm/init.c9
-rw-r--r--arch/m68k/kernel/ints.c10
-rw-r--r--arch/m68k/mac/oss.c4
-rw-r--r--arch/m68k/mm/init.c1
-rw-r--r--arch/m68k/q40/q40ints.c2
-rw-r--r--arch/mips/kernel/asm-offsets.c404
-rw-r--r--arch/mips/vr41xx/common/init.c4
-rw-r--r--arch/mips/vr41xx/common/siu.c36
-rw-r--r--arch/parisc/mm/init.c11
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/Kconfig.debug3
-rw-r--r--arch/powerpc/Makefile1
-rw-r--r--arch/powerpc/boot/dts/mpc8610_hpcd.dts12
-rw-r--r--arch/powerpc/kernel/asm-offsets.c28
-rw-r--r--arch/powerpc/kvm/44x_tlb.c224
-rw-r--r--arch/powerpc/kvm/44x_tlb.h91
-rw-r--r--arch/powerpc/kvm/Kconfig42
-rw-r--r--arch/powerpc/kvm/Makefile15
-rw-r--r--arch/powerpc/kvm/booke_guest.c615
-rw-r--r--arch/powerpc/kvm/booke_host.c83
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S436
-rw-r--r--arch/powerpc/kvm/emulate.c760
-rw-r--r--arch/powerpc/kvm/powerpc.c436
-rw-r--r--arch/powerpc/mm/mem.c9
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c190
-rw-r--r--arch/powerpc/sysdev/axonram.c5
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c41
-rw-r--r--arch/powerpc/sysdev/fsl_soc.h23
-rw-r--r--arch/s390/Kconfig14
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/setup.c14
-rw-r--r--arch/s390/kernel/vtime.c1
-rw-r--r--arch/s390/kvm/Kconfig46
-rw-r--r--arch/s390/kvm/Makefile14
-rw-r--r--arch/s390/kvm/diag.c67
-rw-r--r--arch/s390/kvm/gaccess.h274
-rw-r--r--arch/s390/kvm/intercept.c216
-rw-r--r--arch/s390/kvm/interrupt.c592
-rw-r--r--arch/s390/kvm/kvm-s390.c685
-rw-r--r--arch/s390/kvm/kvm-s390.h64
-rw-r--r--arch/s390/kvm/priv.c323
-rw-r--r--arch/s390/kvm/sie64a.S47
-rw-r--r--arch/s390/kvm/sigp.c288
-rw-r--r--arch/s390/mm/pgtable.c65
-rw-r--r--arch/sh/mm/init.c9
-rw-r--r--arch/sparc64/mm/init.c16
-rw-r--r--arch/um/drivers/chan_kern.c15
-rw-r--r--arch/um/drivers/line.c2
-rw-r--r--arch/um/drivers/mcast_kern.c2
-rw-r--r--arch/um/drivers/mconsole_user.c2
-rw-r--r--arch/um/drivers/net_kern.c6
-rw-r--r--arch/um/drivers/port_user.c2
-rw-r--r--arch/um/drivers/slip_kern.c4
-rw-r--r--arch/um/drivers/stdio_console.c4
-rw-r--r--arch/um/drivers/ubd_kern.c385
-rw-r--r--arch/um/include/chan_kern.h2
-rw-r--r--arch/um/kernel/um_arch.c7
-rw-r--r--arch/um/os-Linux/start_up.c14
-rw-r--r--arch/um/os-Linux/sys-i386/task_size.c12
-rw-r--r--arch/x86/Kconfig34
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/apm_32.c3
-rw-r--r--arch/x86/kernel/crash.c3
-rw-r--r--arch/x86/kernel/kvm.c248
-rw-r--r--arch/x86/kernel/kvmclock.c187
-rw-r--r--arch/x86/kernel/mfgpt_32.c8
-rw-r--r--arch/x86/kernel/process.c117
-rw-r--r--arch/x86/kernel/process_32.c118
-rw-r--r--arch/x86/kernel/process_64.c123
-rw-r--r--arch/x86/kernel/reboot.c13
-rw-r--r--arch/x86/kernel/setup_32.c6
-rw-r--r--arch/x86/kernel/setup_64.c7
-rw-r--r--arch/x86/kvm/Kconfig13
-rw-r--r--arch/x86/kvm/Makefile6
-rw-r--r--arch/x86/kvm/i8254.c611
-rw-r--r--arch/x86/kvm/i8254.h63
-rw-r--r--arch/x86/kvm/irq.c18
-rw-r--r--arch/x86/kvm/irq.h3
-rw-r--r--arch/x86/kvm/kvm_svm.h2
-rw-r--r--arch/x86/kvm/lapic.c35
-rw-r--r--arch/x86/kvm/mmu.c672
-rw-r--r--arch/x86/kvm/mmu.h6
-rw-r--r--arch/x86/kvm/paging_tmpl.h86
-rw-r--r--arch/x86/kvm/segment_descriptor.h29
-rw-r--r--arch/x86/kvm/svm.c352
-rw-r--r--arch/x86/kvm/svm.h3
-rw-r--r--arch/x86/kvm/tss.h59
-rw-r--r--arch/x86/kvm/vmx.c278
-rw-r--r--arch/x86/kvm/vmx.h10
-rw-r--r--arch/x86/kvm/x86.c897
-rw-r--r--arch/x86/kvm/x86_emulate.c285
-rw-r--r--arch/x86/mm/init_32.c36
-rw-r--r--arch/x86/mm/init_64.c9
-rw-r--r--arch/x86/mm/ioremap.c15
-rw-r--r--arch/x86/mm/pat.c9
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--block/bsg.c43
-rw-r--r--drivers/acpi/processor_idle.c19
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/block/brd.c5
-rw-r--r--drivers/char/Kconfig11
-rw-r--r--drivers/char/pcmcia/synclink_cs.c125
-rw-r--r--drivers/char/rtc.c4
-rw-r--r--drivers/char/synclink.c258
-rw-r--r--drivers/char/synclink_gt.c102
-rw-r--r--drivers/char/synclinkmp.c265
-rw-r--r--drivers/char/sysrq.c3
-rw-r--r--drivers/char/vt.c8
-rw-r--r--drivers/gpio/gpiolib.c123
-rw-r--r--drivers/gpio/mcp23s08.c1
-rw-r--r--drivers/gpio/pca953x.c1
-rw-r--r--drivers/gpio/pcf857x.c1
-rw-r--r--drivers/ide/arm/bast-ide.c12
-rw-r--r--drivers/ide/arm/icside.c13
-rw-r--r--drivers/ide/arm/palm_bk3710.c12
-rw-r--r--drivers/ide/arm/rapide.c8
-rw-r--r--drivers/ide/cris/ide-cris.c18
-rw-r--r--drivers/ide/h8300/ide-h8300.c6
-rw-r--r--drivers/ide/ide-acpi.c18
-rw-r--r--drivers/ide/ide-cd.c12
-rw-r--r--drivers/ide/ide-floppy.c12
-rw-r--r--drivers/ide/ide-io.c42
-rw-r--r--drivers/ide/ide-iops.c72
-rw-r--r--drivers/ide/ide-pnp.c5
-rw-r--r--drivers/ide/ide-probe.c73
-rw-r--r--drivers/ide/ide-proc.c8
-rw-r--r--drivers/ide/ide-tape.c1191
-rw-r--r--drivers/ide/ide-taskfile.c29
-rw-r--r--drivers/ide/ide.c360
-rw-r--r--drivers/ide/legacy/ali14xx.c6
-rw-r--r--drivers/ide/legacy/buddha.c14
-rw-r--r--drivers/ide/legacy/dtc2278.c5
-rw-r--r--drivers/ide/legacy/falconide.c6
-rw-r--r--drivers/ide/legacy/gayle.c18
-rw-r--r--drivers/ide/legacy/ht6560b.c11
-rw-r--r--drivers/ide/legacy/ide-4drives.c2
-rw-r--r--drivers/ide/legacy/ide-cs.c12
-rw-r--r--drivers/ide/legacy/ide_platform.c10
-rw-r--r--drivers/ide/legacy/macide.c4
-rw-r--r--drivers/ide/legacy/q40ide.c6
-rw-r--r--drivers/ide/legacy/qd65xx.c33
-rw-r--r--drivers/ide/legacy/umc8672.c4
-rw-r--r--drivers/ide/mips/au1xxx-ide.c7
-rw-r--r--drivers/ide/mips/swarm.c6
-rw-r--r--drivers/ide/pci/aec62xx.c2
-rw-r--r--drivers/ide/pci/alim15x3.c242
-rw-r--r--drivers/ide/pci/amd74xx.c2
-rw-r--r--drivers/ide/pci/cmd640.c89
-rw-r--r--drivers/ide/pci/cmd64x.c6
-rw-r--r--drivers/ide/pci/cy82c693.c4
-rw-r--r--drivers/ide/pci/delkin_cb.c9
-rw-r--r--drivers/ide/pci/hpt366.c2
-rw-r--r--drivers/ide/pci/ns87415.c12
-rw-r--r--drivers/ide/pci/opti621.c7
-rw-r--r--drivers/ide/pci/scc_pata.c11
-rw-r--r--drivers/ide/pci/sgiioc4.c27
-rw-r--r--drivers/ide/pci/siimage.c27
-rw-r--r--drivers/ide/pci/trm290.c2
-rw-r--r--drivers/ide/pci/via82cxxx.c2
-rw-r--r--drivers/ide/ppc/mpc8xx.c28
-rw-r--r--drivers/ide/ppc/pmac.c7
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c75
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c16
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c15
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c51
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c6
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c23
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c122
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h33
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c6
-rw-r--r--drivers/infiniband/hw/nes/nes.c15
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c27
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c20
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c18
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c125
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/input/joystick/xpad.c34
-rw-r--r--drivers/isdn/capi/capi.c34
-rw-r--r--drivers/isdn/capi/capidrv.c28
-rw-r--r--drivers/isdn/capi/capifs.c5
-rw-r--r--drivers/isdn/capi/capilib.c4
-rw-r--r--drivers/isdn/capi/capiutil.c2
-rw-r--r--drivers/isdn/capi/kcapi.c22
-rw-r--r--drivers/isdn/capi/kcapi.h2
-rw-r--r--drivers/isdn/hardware/avm/b1.c10
-rw-r--r--drivers/isdn/hardware/avm/b1dma.c10
-rw-r--r--drivers/isdn/hardware/avm/b1isa.c4
-rw-r--r--drivers/isdn/hardware/avm/b1pci.c4
-rw-r--r--drivers/isdn/hardware/avm/b1pcmcia.c4
-rw-r--r--drivers/isdn/hardware/avm/c4.c12
-rw-r--r--drivers/isdn/hardware/avm/t1isa.c4
-rw-r--r--drivers/isdn/hardware/avm/t1pci.c4
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c2
-rw-r--r--drivers/isdn/hardware/eicon/message.c12
-rw-r--r--drivers/isdn/hisax/asuscom.c2
-rw-r--r--drivers/isdn/hisax/avm_pci.c2
-rw-r--r--drivers/isdn/hisax/diva.c2
-rw-r--r--drivers/isdn/hisax/elsa.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c2
-rw-r--r--drivers/isdn/hisax/hfc_usb.c6
-rw-r--r--drivers/isdn/hisax/hfcscard.c2
-rw-r--r--drivers/isdn/hisax/hisax_debug.h6
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c12
-rw-r--r--drivers/isdn/hisax/ix1_micro.c2
-rw-r--r--drivers/isdn/hisax/niccy.c2
-rw-r--r--drivers/isdn/hisax/sedlbauer.c2
-rw-r--r--drivers/isdn/hisax/st5481.h10
-rw-r--r--drivers/isdn/hisax/st5481_usb.c2
-rw-r--r--drivers/isdn/hisax/teles3.c2
-rw-r--r--drivers/isdn/i4l/isdn_common.c2
-rw-r--r--drivers/isdn/i4l/isdn_net.h6
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c32
-rw-r--r--drivers/isdn/i4l/isdn_tty.c6
-rw-r--r--drivers/md/dm-uevent.c22
-rw-r--r--drivers/md/md.c8
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c4
-rw-r--r--drivers/md/raid5.c158
-rw-r--r--drivers/md/raid6algos.c3
-rw-r--r--drivers/media/video/vino.c2
-rw-r--r--drivers/mfd/sm501.c84
-rw-r--r--drivers/mfd/ucb1x00-ts.c7
-rw-r--r--drivers/misc/enclosure.c100
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/mlx4/alloc.c157
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/main.c3
-rw-r--r--drivers/net/mlx4/mlx4.h3
-rw-r--r--drivers/net/mlx4/qp.c31
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig6
-rw-r--r--drivers/oprofile/buffer_sync.c2
-rw-r--r--drivers/oprofile/cpu_buffer.c16
-rw-r--r--drivers/oprofile/cpu_buffer.h3
-rw-r--r--drivers/oprofile/oprofile_stats.c4
-rw-r--r--drivers/pnp/driver.c4
-rw-r--r--drivers/pnp/quirks.c15
-rw-r--r--drivers/rtc/Kconfig5
-rw-r--r--drivers/rtc/rtc-at91rm9200.c12
-rw-r--r--drivers/rtc/rtc-at91sam9.c2
-rw-r--r--drivers/rtc/rtc-ds1302.c2
-rw-r--r--drivers/rtc/rtc-ds1511.c6
-rw-r--r--drivers/rtc/rtc-ds1672.c14
-rw-r--r--drivers/rtc/rtc-isl1208.c357
-rw-r--r--drivers/rtc/rtc-max6900.c6
-rw-r--r--drivers/rtc/rtc-max6902.c4
-rw-r--r--drivers/rtc/rtc-pcf8563.c126
-rw-r--r--drivers/rtc/rtc-pcf8583.c2
-rw-r--r--drivers/rtc/rtc-rs5c313.c4
-rw-r--r--drivers/rtc/rtc-rs5c372.c18
-rw-r--r--drivers/rtc/rtc-s3c.c6
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/rtc/rtc-sysfs.c12
-rw-r--r--drivers/rtc/rtc-test.c8
-rw-r--r--drivers/rtc/rtc-v3020.c4
-rw-r--r--drivers/rtc/rtc-x1205.c170
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/block/dcssblk.c8
-rw-r--r--drivers/s390/kvm/Makefile9
-rw-r--r--drivers/s390/kvm/kvm_virtio.c338
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c39
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h18
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c114
-rw-r--r--drivers/scsi/FlashPoint.c2
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aha152x.c7
-rw-r--r--drivers/scsi/aha1542.c26
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h23
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.reg115
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c835
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_inline.h859
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c181
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h177
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c33
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c8
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_proc.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg.h_shipped1145
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped1555
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_seq.h_shipped6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h55
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.reg45
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_93cx6.c16
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c676
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_inline.h616
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c95
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h142
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c73
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c9
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_proc.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped233
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped6
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.c6
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y105
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l19
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c25
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h1
-rw-r--r--drivers/scsi/eata.c11
-rw-r--r--drivers/scsi/esp_scsi.c35
-rw-r--r--drivers/scsi/esp_scsi.h13
-rw-r--r--drivers/scsi/hosts.c29
-rw-r--r--drivers/scsi/ide-scsi.c13
-rw-r--r--drivers/scsi/jazz_esp.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c10
-rw-r--r--drivers/scsi/mac_esp.c657
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c394
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h26
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c19
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_proc.c7
-rw-r--r--drivers/scsi/scsi_scan.c84
-rw-r--r--drivers/scsi/scsi_sysfs.c142
-rw-r--r--drivers/scsi/scsi_transport_fc.c60
-rw-r--r--drivers/scsi/scsi_transport_sas.c22
-rw-r--r--drivers/scsi/scsi_transport_spi.c33
-rw-r--r--drivers/scsi/sgiwd93.c4
-rw-r--r--drivers/scsi/sni_53c710.c2
-rw-r--r--drivers/scsi/st.c10
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/scsi/u14-34f.c9
-rw-r--r--drivers/serial/68360serial.c3
-rw-r--r--drivers/serial/8250.c9
-rw-r--r--drivers/serial/8250_pci.c2
-rw-r--r--drivers/serial/atmel_serial.c1
-rw-r--r--drivers/serial/crisv10.c7
-rw-r--r--drivers/serial/dz.c2
-rw-r--r--drivers/serial/serial_core.c15
-rw-r--r--drivers/serial/vr41xx_siu.c15
-rw-r--r--drivers/spi/Kconfig13
-rw-r--r--drivers/spi/atmel_spi.c2
-rw-r--r--drivers/spi/omap_uwire.c4
-rw-r--r--drivers/spi/pxa2xx_spi.c52
-rw-r--r--drivers/spi/spi_bitbang.c2
-rw-r--r--drivers/spi/spi_imx.c223
-rw-r--r--drivers/spi/spi_mpc83xx.c2
-rw-r--r--drivers/spi/spi_s3c24xx.c2
-rw-r--r--drivers/spi/xilinx_spi.c8
-rw-r--r--drivers/video/Kconfig73
-rw-r--r--drivers/video/Makefile2
-rw-r--r--drivers/video/am200epd.c295
-rw-r--r--drivers/video/amifb.c2
-rw-r--r--drivers/video/arkfb.c32
-rw-r--r--drivers/video/atafb.c2
-rw-r--r--drivers/video/atmel_lcdfb.c74
-rw-r--r--drivers/video/aty/aty128fb.c4
-rw-r--r--drivers/video/aty/atyfb_base.c7
-rw-r--r--drivers/video/aty/mach64_ct.c16
-rw-r--r--drivers/video/aty/radeon_base.c51
-rw-r--r--drivers/video/aty/radeon_i2c.c13
-rw-r--r--drivers/video/aty/radeon_monitor.c56
-rw-r--r--drivers/video/aty/radeonfb.h20
-rw-r--r--drivers/video/bf54x-lq043fb.c8
-rw-r--r--drivers/video/cfbcopyarea.c23
-rw-r--r--drivers/video/cfbfillrect.c48
-rw-r--r--drivers/video/cfbimgblt.c54
-rw-r--r--drivers/video/cirrusfb.c6
-rw-r--r--drivers/video/console/fbcon.c5
-rw-r--r--drivers/video/console/fbcon.h12
-rw-r--r--drivers/video/fb_draw.h31
-rw-r--r--drivers/video/fbmem.c95
-rw-r--r--drivers/video/fsl-diu-fb.c1721
-rw-r--r--drivers/video/fsl-diu-fb.h223
-rw-r--r--drivers/video/geode/Kconfig20
-rw-r--r--drivers/video/geode/Makefile2
-rw-r--r--drivers/video/geode/display_gx.c125
-rw-r--r--drivers/video/geode/display_gx.h101
-rw-r--r--drivers/video/geode/gxfb.h358
-rw-r--r--drivers/video/geode/gxfb_core.c160
-rw-r--r--drivers/video/geode/lxfb.h527
-rw-r--r--drivers/video/geode/lxfb_core.c118
-rw-r--r--drivers/video/geode/lxfb_ops.c699
-rw-r--r--drivers/video/geode/suspend_gx.c267
-rw-r--r--drivers/video/geode/video_gx.c162
-rw-r--r--drivers/video/geode/video_gx.h72
-rw-r--r--drivers/video/gxt4500.c2
-rw-r--r--drivers/video/hecubafb.c302
-rw-r--r--drivers/video/imsttfb.c8
-rw-r--r--drivers/video/imxfb.c6
-rw-r--r--drivers/video/intelfb/intelfb.h12
-rw-r--r--drivers/video/intelfb/intelfb_i2c.c2
-rw-r--r--drivers/video/intelfb/intelfbdrv.c12
-rw-r--r--drivers/video/intelfb/intelfbhw.c16
-rw-r--r--drivers/video/matrox/matroxfb_DAC1064.c36
-rw-r--r--drivers/video/matrox/matroxfb_Ti3026.c16
-rw-r--r--drivers/video/matrox/matroxfb_accel.c14
-rw-r--r--drivers/video/matrox/matroxfb_base.c42
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c2
-rw-r--r--drivers/video/matrox/matroxfb_maven.c2
-rw-r--r--drivers/video/matrox/matroxfb_misc.c12
-rw-r--r--drivers/video/metronomefb.c328
-rw-r--r--drivers/video/modedb.c26
-rw-r--r--drivers/video/n411.c202
-rw-r--r--drivers/video/nvidia/nv_hw.c4
-rw-r--r--drivers/video/nvidia/nv_setup.c4
-rw-r--r--drivers/video/nvidia/nvidia.c9
-rw-r--r--drivers/video/offb.c15
-rw-r--r--drivers/video/pm2fb.c24
-rw-r--r--drivers/video/pm3fb.c4
-rw-r--r--drivers/video/riva/fbdev.c12
-rw-r--r--drivers/video/riva/nv_driver.c7
-rw-r--r--drivers/video/riva/riva_hw.c4
-rw-r--r--drivers/video/s3c2410fb.c6
-rw-r--r--drivers/video/s3fb.c34
-rw-r--r--drivers/video/sa1100fb.h2
-rw-r--r--drivers/video/savage/savagefb-i2c.c2
-rw-r--r--drivers/video/sis/sis.h2
-rw-r--r--drivers/video/sstfb.c10
-rw-r--r--drivers/video/stifb.c4
-rw-r--r--drivers/video/syscopyarea.c20
-rw-r--r--drivers/video/sysfillrect.c49
-rw-r--r--drivers/video/sysimgblt.c49
-rw-r--r--drivers/video/tdfxfb.c2
-rw-r--r--drivers/video/tridentfb.c14
-rw-r--r--drivers/video/uvesafb.c9
-rw-r--r--drivers/video/vermilion/vermilion.c5
-rw-r--r--drivers/video/vt8623fb.c38
-rw-r--r--drivers/video/w100fb.c6
-rw-r--r--fs/aio.c12
-rw-r--r--fs/buffer.c20
-rw-r--r--fs/dquot.c109
-rw-r--r--fs/ext2/balloc.c29
-rw-r--r--fs/ext2/dir.c20
-rw-r--r--fs/ext2/ialloc.c12
-rw-r--r--fs/ext2/inode.c15
-rw-r--r--fs/ext2/super.c27
-rw-r--r--fs/ext2/xattr.c15
-rw-r--r--fs/ext2/xip.c53
-rw-r--r--fs/ext2/xip.h9
-rw-r--r--fs/ext3/balloc.c30
-rw-r--r--fs/ext3/ext3_jbd.c12
-rw-r--r--fs/ext3/fsync.c3
-rw-r--r--fs/ext3/ialloc.c6
-rw-r--r--fs/ext3/inode.c29
-rw-r--r--fs/ext3/namei.c37
-rw-r--r--fs/ext3/resize.c71
-rw-r--r--fs/ext3/super.c53
-rw-r--r--fs/ext3/xattr.c24
-rw-r--r--fs/ext3/xattr.h7
-rw-r--r--fs/ext4/super.c15
-rw-r--r--fs/fat/dir.c52
-rw-r--r--fs/fat/fatent.c9
-rw-r--r--fs/fat/file.c204
-rw-r--r--fs/fat/inode.c38
-rw-r--r--fs/gfs2/ops_address.c2
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/jbd/commit.c19
-rw-r--r--fs/jbd/journal.c18
-rw-r--r--fs/jbd/revoke.c2
-rw-r--r--fs/jbd/transaction.c38
-rw-r--r--fs/msdos/namei.c2
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/ncpfs/inode.c6
-rw-r--r--fs/ncpfs/ioctl.c17
-rw-r--r--fs/ncpfs/ncpsign_kernel.c2
-rw-r--r--fs/open.c2
-rw-r--r--fs/partitions/msdos.c20
-rw-r--r--fs/proc/proc_misc.c17
-rw-r--r--fs/proc/task_mmu.c18
-rw-r--r--fs/quota.c5
-rw-r--r--fs/quota_v1.c3
-rw-r--r--fs/quota_v2.c3
-rw-r--r--fs/reiserfs/bitmap.c8
-rw-r--r--fs/reiserfs/do_balan.c14
-rw-r--r--fs/reiserfs/ioctl.c4
-rw-r--r--fs/reiserfs/journal.c10
-rw-r--r--fs/reiserfs/namei.c8
-rw-r--r--fs/reiserfs/objectid.c7
-rw-r--r--fs/reiserfs/stree.c3
-rw-r--r--fs/reiserfs/super.c38
-rw-r--r--fs/super.c10
-rw-r--r--fs/sysfs/file.c14
-rw-r--r--fs/sysfs/group.c83
-rw-r--r--fs/sysfs/sysfs.h2
-rw-r--r--fs/udf/namei.c8
-rw-r--r--fs/ufs/balloc.c4
-rw-r--r--fs/ufs/dir.c14
-rw-r--r--fs/ufs/inode.c2
-rw-r--r--fs/ufs/swab.h36
-rw-r--r--fs/ufs/ufs.h2
-rw-r--r--fs/vfat/namei.c37
-rw-r--r--include/asm-alpha/bug.h16
-rw-r--r--include/asm-alpha/byteorder.h2
-rw-r--r--include/asm-alpha/pgtable.h2
-rw-r--r--include/asm-arm/arch-sa1100/ide.h6
-rw-r--r--include/asm-arm/pgtable.h3
-rw-r--r--include/asm-avr32/pgtable.h8
-rw-r--r--include/asm-cris/arch-v10/ide.h11
-rw-r--r--include/asm-cris/pgtable.h4
-rw-r--r--include/asm-frv/pgtable.h2
-rw-r--r--include/asm-generic/gpio.h15
-rw-r--r--include/asm-ia64/gcc_intrin.h12
-rw-r--r--include/asm-ia64/hugetlb.h79
-rw-r--r--include/asm-ia64/kvm.h205
-rw-r--r--include/asm-ia64/kvm_host.h524
-rw-r--r--include/asm-ia64/kvm_para.h29
-rw-r--r--include/asm-ia64/page.h6
-rw-r--r--include/asm-ia64/pgtable.h3
-rw-r--r--include/asm-ia64/processor.h63
-rw-r--r--include/asm-m32r/pgtable.h10
-rw-r--r--include/asm-m68k/motorola_pgtable.h2
-rw-r--r--include/asm-m68k/sun3_pgtable.h2
-rw-r--r--include/asm-mips/pgtable.h2
-rw-r--r--include/asm-mips/vr41xx/siu.h8
-rw-r--r--include/asm-mips/vr41xx/vr41xx.h8
-rw-r--r--include/asm-mn10300/pgtable.h3
-rw-r--r--include/asm-parisc/pgtable.h2
-rw-r--r--include/asm-powerpc/hugetlb.h79
-rw-r--r--include/asm-powerpc/kvm.h53
-rw-r--r--include/asm-powerpc/kvm_asm.h55
-rw-r--r--include/asm-powerpc/kvm_host.h152
-rw-r--r--include/asm-powerpc/kvm_para.h37
-rw-r--r--include/asm-powerpc/kvm_ppc.h88
-rw-r--r--include/asm-powerpc/mmu-44x.h2
-rw-r--r--include/asm-powerpc/page_64.h7
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h3
-rw-r--r--include/asm-powerpc/pgtable-ppc64.h3
-rw-r--r--include/asm-ppc/pgtable.h3
-rw-r--r--include/asm-s390/Kbuild1
-rw-r--r--include/asm-s390/kvm.h41
-rw-r--r--include/asm-s390/kvm_host.h234
-rw-r--r--include/asm-s390/kvm_para.h150
-rw-r--r--include/asm-s390/kvm_virtio.h53
-rw-r--r--include/asm-s390/lowcore.h15
-rw-r--r--include/asm-s390/mmu.h1
-rw-r--r--include/asm-s390/mmu_context.h8
-rw-r--r--include/asm-s390/pgtable.h106
-rw-r--r--include/asm-s390/setup.h1
-rw-r--r--include/asm-sh/hugetlb.h91
-rw-r--r--include/asm-sh/pgtable_32.h3
-rw-r--r--include/asm-sh/pgtable_64.h10
-rw-r--r--include/asm-sparc/pgtable.h7
-rw-r--r--include/asm-sparc64/hugetlb.h84
-rw-r--r--include/asm-sparc64/page.h2
-rw-r--r--include/asm-sparc64/pgtable.h10
-rw-r--r--include/asm-um/pgtable.h10
-rw-r--r--include/asm-x86/geode.h38
-rw-r--r--include/asm-x86/hugetlb.h91
-rw-r--r--include/asm-x86/kvm.h41
-rw-r--r--include/asm-x86/kvm_host.h99
-rw-r--r--include/asm-x86/kvm_para.h55
-rw-r--r--include/asm-x86/pgtable.h10
-rw-r--r--include/asm-x86/processor.h1
-rw-r--r--include/asm-x86/reboot.h2
-rw-r--r--include/asm-xtensa/pgtable.h4
-rw-r--r--include/linux/bitmap.h6
-rw-r--r--include/linux/bootmem.h2
-rw-r--r--include/linux/bsg.h14
-rw-r--r--include/linux/cache.h4
-rw-r--r--include/linux/capability.h3
-rw-r--r--include/linux/cpumask.h22
-rw-r--r--include/linux/cpuset.h4
-rw-r--r--include/linux/dmi.h1
-rw-r--r--include/linux/fb.h44
-rw-r--r--include/linux/fs.h7
-rw-r--r--include/linux/gfp.h44
-rw-r--r--include/linux/hugetlb.h46
-rw-r--r--include/linux/i2o.h5
-rw-r--r--include/linux/ide.h95
-rw-r--r--include/linux/init_task.h3
-rw-r--r--include/linux/kprobes.h34
-rw-r--r--include/linux/kvm.h130
-rw-r--r--include/linux/kvm_host.h59
-rw-r--r--include/linux/kvm_para.h11
-rw-r--r--include/linux/kvm_types.h2
-rw-r--r--include/linux/list.h9
-rw-r--r--include/linux/memory_hotplug.h33
-rw-r--r--include/linux/mempolicy.h156
-rw-r--r--include/linux/mlx4/device.h40
-rw-r--r--include/linux/mlx4/qp.h4
-rw-r--r--include/linux/mm.h57
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/mmzone.h183
-rw-r--r--include/linux/msdos_fs.h10
-rw-r--r--include/linux/ncp_fs.h7
-rw-r--r--include/linux/nodemask.h22
-rw-r--r--include/linux/notifier.h1
-rw-r--r--include/linux/oom.h4
-rw-r--r--include/linux/page-flags.h319
-rw-r--r--include/linux/prctl.h9
-rw-r--r--include/linux/quota.h21
-rw-r--r--include/linux/quotaops.h137
-rw-r--r--include/linux/raid/raid5.h7
-rw-r--r--include/linux/reiserfs_fs.h1
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/linux/securebits.h25
-rw-r--r--include/linux/security.h16
-rw-r--r--include/linux/serial_8250.h1
-rw-r--r--include/linux/shmem_fs.h3
-rw-r--r--include/linux/suspend.h15
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/linux/synclink.h4
-rw-r--r--include/linux/sysfs.h4
-rw-r--r--include/linux/vmalloc.h5
-rw-r--r--include/linux/vmstat.h6
-rw-r--r--include/net/compat.h3
-rw-r--r--include/scsi/scsi_device.h3
-rw-r--r--include/video/atmel_lcdc.h11
-rw-r--r--include/video/hecubafb.h51
-rw-r--r--include/video/metronomefb.h62
-rw-r--r--init/Kconfig6
-rw-r--r--ipc/shm.c6
-rw-r--r--kernel/bounds.c23
-rw-r--r--kernel/cpuset.c22
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c8
-rw-r--r--kernel/hrtimer.c15
-rw-r--r--kernel/kexec.c3
-rw-r--r--kernel/kprobes.c349
-rw-r--r--kernel/power/console.c27
-rw-r--r--kernel/sys.c27
-rw-r--r--lib/bitmap.c158
-rw-r--r--lib/radix-tree.c9
-rw-r--r--mm/Kconfig12
-rw-r--r--mm/bootmem.c32
-rw-r--r--mm/dmapool.c12
-rw-r--r--mm/fadvise.c2
-rw-r--r--mm/filemap.c10
-rw-r--r--mm/filemap_xip.c200
-rw-r--r--mm/hugetlb.c78
-rw-r--r--mm/internal.h3
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memory.c228
-rw-r--r--mm/memory_hotplug.c184
-rw-r--r--mm/mempolicy.c1051
-rw-r--r--mm/mincore.c2
-rw-r--r--mm/mmap.c33
-rw-r--r--mm/mmzone.c30
-rw-r--r--mm/nommu.c6
-rw-r--r--mm/oom_kill.c58
-rw-r--r--mm/page_alloc.c274
-rw-r--r--mm/pagewalk.c8
-rw-r--r--mm/rmap.c8
-rw-r--r--mm/shmem.c144
-rw-r--r--mm/slab.c17
-rw-r--r--mm/slub.c18
-rw-r--r--mm/sparse.c108
-rw-r--r--mm/swap.c37
-rw-r--r--mm/swapfile.c8
-rw-r--r--mm/truncate.c11
-rw-r--r--mm/vmalloc.c141
-rw-r--r--mm/vmscan.c46
-rw-r--r--mm/vmstat.c11
-rw-r--r--net/can/raw.c21
-rw-r--r--net/compat.c117
-rw-r--r--net/ipv4/ip_sockglue.c5
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv6/Kconfig2
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c5
-rw-r--r--net/mac80211/Kconfig4
-rw-r--r--net/mac80211/mesh.h1
-rw-r--r--net/mac80211/mesh_hwmp.c1
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/tipc/msg.h7
-rw-r--r--net/xfrm/xfrm_algo.c3
-rw-r--r--net/xfrm/xfrm_state.c2
-rwxr-xr-xscripts/kernel-doc25
-rw-r--r--security/capability.c1
-rw-r--r--security/commoncap.c103
-rw-r--r--security/dummy.c2
-rw-r--r--security/root_plug.c1
-rw-r--r--security/security.c4
-rw-r--r--security/selinux/hooks.c5
-rw-r--r--security/smack/smack_lsm.c3
-rw-r--r--security/smack/smackfs.c4
-rw-r--r--virt/kvm/kvm_main.c230
-rw-r--r--virt/kvm/kvm_trace.c276
752 files changed, 42403 insertions, 14501 deletions
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 488dd4a4945b..617c2d979975 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -645,4 +645,58 @@ X!Idrivers/video/console/fonts.c
!Edrivers/i2c/i2c-core.c
</chapter>
+ <chapter id="clk">
+ <title>Clock Framework</title>
+
+ <para>
+ The clock framework defines programming interfaces to support
+ software management of the system clock tree.
+ This framework is widely used with System-On-Chip (SOC) platforms
+ to support power management and various devices which may need
+ custom clock rates.
+ Note that these "clocks" don't relate to timekeeping or real
+ time clocks (RTCs), each of which have separate frameworks.
+ These <structname>struct clk</structname> instances may be used
+ to manage for example a 96 MHz signal that is used to shift bits
+ into and out of peripherals or busses, or otherwise trigger
+ synchronous state machine transitions in system hardware.
+ </para>
+
+ <para>
+ Power management is supported by explicit software clock gating:
+ unused clocks are disabled, so the system doesn't waste power
+ changing the state of transistors that aren't in active use.
+ On some systems this may be backed by hardware clock gating,
+ where clocks are gated without being disabled in software.
+ Sections of chips that are powered but not clocked may be able
+ to retain their last state.
+ This low power state is often called a <emphasis>retention
+ mode</emphasis>.
+ This mode still incurs leakage currents, especially with finer
+ circuit geometries, but for CMOS circuits power is mostly used
+ by clocked state changes.
+ </para>
+
+ <para>
+ Power-aware drivers only enable their clocks when the device
+ they manage is in active use. Also, system sleep states often
+ differ according to which clock domains are active: while a
+ "standby" state may allow wakeup from several active domains, a
+ "mem" (suspend-to-RAM) state may require a more wholesale shutdown
+ of clocks derived from higher speed PLLs and oscillators, limiting
+ the number of possible wakeup event sources. A driver's suspend
+ method may need to be aware of system-specific clock constraints
+ on the target sleep state.
+ </para>
+
+ <para>
+ Some platforms support programmable clock generators. These
+ can be used by external chips of various kinds, such as other
+ CPUs, multimedia codecs, and devices with strict requirements
+ for interface clocking.
+ </para>
+
+!Iinclude/linux/clk.h
+ </chapter>
+
</book>
diff --git a/Documentation/fb/gxfb.txt b/Documentation/fb/gxfb.txt
new file mode 100644
index 000000000000..2f640903bbb2
--- /dev/null
+++ b/Documentation/fb/gxfb.txt
@@ -0,0 +1,52 @@
+[This file is cloned from VesaFB/aty128fb]
+
+What is gxfb?
+=================
+
+This is a graphics framebuffer driver for AMD Geode GX2 based processors.
+
+Advantages:
+
+ * No need to use AMD's VSA code (or other VESA emulation layer) in the
+ BIOS.
+ * It provides a nice large console (128 cols + 48 lines with 1024x768)
+ without using tiny, unreadable fonts.
+ * You can run XF68_FBDev on top of /dev/fb0
+ * Most important: boot logo :-)
+
+Disadvantages:
+
+ * graphic mode is slower than text mode...
+
+
+How to use it?
+==============
+
+Switching modes is done using gxfb.mode_option=<resolution>... boot
+parameter or using `fbset' program.
+
+See Documentation/fb/modedb.txt for more information on modedb
+resolutions.
+
+
+X11
+===
+
+XF68_FBDev should generally work fine, but it is non-accelerated.
+
+
+Configuration
+=============
+
+You can pass kernel command line options to gxfb with gxfb.<option>.
+For example, gxfb.mode_option=800x600@75.
+Accepted options:
+
+mode_option - specify the video mode. Of the form
+ <x>x<y>[-<bpp>][@<refresh>]
+vram - size of video ram (normally auto-detected)
+vt_switch - enable vt switching during suspend/resume. The vt
+ switch is slow, but harmless.
+
+--
+Andres Salomon <dilinger@debian.org>
diff --git a/Documentation/fb/intelfb.txt b/Documentation/fb/intelfb.txt
index da5ee74219e8..27a3160650a4 100644
--- a/Documentation/fb/intelfb.txt
+++ b/Documentation/fb/intelfb.txt
@@ -14,6 +14,8 @@ graphics devices. These would include:
Intel 915GM
Intel 945G
Intel 945GM
+ Intel 965G
+ Intel 965GM
B. List of available options
diff --git a/Documentation/fb/lxfb.txt b/Documentation/fb/lxfb.txt
new file mode 100644
index 000000000000..38b3ca6f6ca7
--- /dev/null
+++ b/Documentation/fb/lxfb.txt
@@ -0,0 +1,52 @@
+[This file is cloned from VesaFB/aty128fb]
+
+What is lxfb?
+=================
+
+This is a graphics framebuffer driver for AMD Geode LX based processors.
+
+Advantages:
+
+ * No need to use AMD's VSA code (or other VESA emulation layer) in the
+ BIOS.
+ * It provides a nice large console (128 cols + 48 lines with 1024x768)
+ without using tiny, unreadable fonts.
+ * You can run XF68_FBDev on top of /dev/fb0
+ * Most important: boot logo :-)
+
+Disadvantages:
+
+ * graphic mode is slower than text mode...
+
+
+How to use it?
+==============
+
+Switching modes is done using lxfb.mode_option=<resolution>... boot
+parameter or using `fbset' program.
+
+See Documentation/fb/modedb.txt for more information on modedb
+resolutions.
+
+
+X11
+===
+
+XF68_FBDev should generally work fine, but it is non-accelerated.
+
+
+Configuration
+=============
+
+You can pass kernel command line options to lxfb with lxfb.<option>.
+For example, lxfb.mode_option=800x600@75.
+Accepted options:
+
+mode_option - specify the video mode. Of the form
+ <x>x<y>[-<bpp>][@<refresh>]
+vram - size of video ram (normally auto-detected)
+vt_switch - enable vt switching during suspend/resume. The vt
+ switch is slow, but harmless.
+
+--
+Andres Salomon <dilinger@debian.org>
diff --git a/Documentation/fb/metronomefb.txt b/Documentation/fb/metronomefb.txt
index b9a2e7b7e838..237ca412582d 100644
--- a/Documentation/fb/metronomefb.txt
+++ b/Documentation/fb/metronomefb.txt
@@ -1,7 +1,7 @@
Metronomefb
-----------
Maintained by Jaya Kumar <jayakumar.lkml.gmail.com>
-Last revised: Nov 20, 2007
+Last revised: Mar 10, 2008
Metronomefb is a driver for the Metronome display controller. The controller
is from E-Ink Corporation. It is intended to be used to drive the E-Ink
@@ -11,20 +11,18 @@ display media here http://www.e-ink.com/products/matrix/metronome.html .
Metronome is interfaced to the host CPU through the AMLCD interface. The
host CPU generates the control information and the image in a framebuffer
which is then delivered to the AMLCD interface by a host specific method.
-Currently, that's implemented for the PXA's LCDC controller. The display and
-error status are each pulled through individual GPIOs.
+The display and error status are each pulled through individual GPIOs.
-Metronomefb was written for the PXA255/gumstix/lyre combination and
-therefore currently has board set specific code in it. If other boards based on
-other architectures are available, then the host specific code can be separated
-and abstracted out.
+Metronomefb is platform independent and depends on a board specific driver
+to do all physical IO work. Currently, an example is implemented for the
+PXA board used in the AM-200 EPD devkit. This example is am200epd.c
Metronomefb requires waveform information which is delivered via the AMLCD
interface to the metronome controller. The waveform information is expected to
be delivered from userspace via the firmware class interface. The waveform file
can be compressed as long as your udev or hotplug script is aware of the need
-to uncompress it before delivering it. metronomefb will ask for waveform.wbf
-which would typically go into /lib/firmware/waveform.wbf depending on your
+to uncompress it before delivering it. metronomefb will ask for metronome.wbf
+which would typically go into /lib/firmware/metronome.wbf depending on your
udev/hotplug setup. I have only tested with a single waveform file which was
originally labeled 23P01201_60_WT0107_MTC. I do not know what it stands for.
Caution should be exercised when manipulating the waveform as there may be
diff --git a/Documentation/fb/modedb.txt b/Documentation/fb/modedb.txt
index 4fcdb4cf4cca..ec4dee75a354 100644
--- a/Documentation/fb/modedb.txt
+++ b/Documentation/fb/modedb.txt
@@ -125,8 +125,12 @@ There may be more modes.
amifb - Amiga chipset frame buffer
aty128fb - ATI Rage128 / Pro frame buffer
atyfb - ATI Mach64 frame buffer
+ pm2fb - Permedia 2/2V frame buffer
+ pm3fb - Permedia 3 frame buffer
+ sstfb - Voodoo 1/2 (SST1) chipset frame buffer
tdfxfb - 3D Fx frame buffer
tridentfb - Trident (Cyber)blade chipset frame buffer
+ vt8623fb - VIA 8623 frame buffer
BTW, only a few drivers use this at the moment. Others are to follow
(feel free to send patches).
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 448729fcaeb1..599fe55bf297 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -128,15 +128,6 @@ Who: Arjan van de Ven <arjan@linux.intel.com>
---------------------------
-What: vm_ops.nopage
-When: Soon, provided in-kernel callers have been converted
-Why: This interface is replaced by vm_ops.fault, but it has been around
- forever, is used by a lot of drivers, and doesn't cost much to
- maintain.
-Who: Nick Piggin <npiggin@suse.de>
-
----------------------------
-
What: PHYSDEVPATH, PHYSDEVBUS, PHYSDEVDRIVER in the uevent environment
When: October 2008
Why: The stacking of class devices makes these values misleading and
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 42d4b30b1045..c2992bc54f2f 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -511,7 +511,6 @@ prototypes:
void (*open)(struct vm_area_struct*);
void (*close)(struct vm_area_struct*);
int (*fault)(struct vm_area_struct*, struct vm_fault *);
- struct page *(*nopage)(struct vm_area_struct*, unsigned long, int *);
int (*page_mkwrite)(struct vm_area_struct *, struct page *);
locking rules:
@@ -519,7 +518,6 @@ locking rules:
open: no yes
close: no yes
fault: no yes
-nopage: no yes
page_mkwrite: no yes no
->page_mkwrite() is called when a previously read-only page is
@@ -537,4 +535,3 @@ NULL.
ipc/shm.c::shm_delete() - may need BKL.
->read() and ->write() in many drivers are (probably) missing BKL.
-drivers/sgi/char/graphics.c::sgi_graphics_nopage() - may need BKL.
diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
index 145e44086358..222437efd75a 100644
--- a/Documentation/filesystems/tmpfs.txt
+++ b/Documentation/filesystems/tmpfs.txt
@@ -92,6 +92,18 @@ NodeList format is a comma-separated list of decimal numbers and ranges,
a range being two hyphen-separated decimal numbers, the smallest and
largest node numbers in the range. For example, mpol=bind:0-3,5,7,9-15
+NUMA memory allocation policies have optional flags that can be used in
+conjunction with their modes. These optional flags can be specified
+when tmpfs is mounted by appending them to the mode before the NodeList.
+See Documentation/vm/numa_memory_policy.txt for a list of all available
+memory allocation policy mode flags.
+
+ =static is equivalent to MPOL_F_STATIC_NODES
+ =relative is equivalent to MPOL_F_RELATIVE_NODES
+
+For example, mpol=bind=static:NodeList, is the equivalent of an
+allocation policy of MPOL_BIND | MPOL_F_STATIC_NODES.
+
Note that trying to mount a tmpfs with an mpol option will fail if the
running kernel does not support NUMA; and will fail if its nodelist
specifies a node which is not online. If your system relies on that
diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt
index fcc123ffa252..2d5e1e582e13 100644
--- a/Documentation/filesystems/vfat.txt
+++ b/Documentation/filesystems/vfat.txt
@@ -17,6 +17,21 @@ dmask=### -- The permission mask for the directory.
fmask=### -- The permission mask for files.
The default is the umask of current process.
+allow_utime=### -- This option controls the permission check of mtime/atime.
+
+ 20 - If current process is in group of file's group ID,
+ you can change timestamp.
+ 2 - Other users can change timestamp.
+
+ The default is set from `dmask' option. (If the directory is
+ writable, utime(2) is also allowed. I.e. ~dmask & 022)
+
+ Normally utime(2) checks current process is owner of
+ the file, or it has CAP_FOWNER capability. But FAT
+ filesystem doesn't have uid/gid on disk, so normal
+ check is too unflexible. With this option you can
+ relax it.
+
codepage=### -- Sets the codepage number for converting to shortname
characters on FAT filesystem.
By default, FAT_DEFAULT_CODEPAGE setting is used.
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index 54630095aa3c..c35ca9e40d4c 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -107,6 +107,16 @@ type of GPIO controller, and on one particular board 80-95 with an FPGA.
The numbers need not be contiguous; either of those platforms could also
use numbers 2000-2063 to identify GPIOs in a bank of I2C GPIO expanders.
+If you want to initialize a structure with an invalid GPIO number, use
+some negative number (perhaps "-EINVAL"); that will never be valid. To
+test if a number could reference a GPIO, you may use this predicate:
+
+ int gpio_is_valid(int number);
+
+A number that's not valid will be rejected by calls which may request
+or free GPIOs (see below). Other numbers may also be rejected; for
+example, a number might be valid but unused on a given board.
+
Whether a platform supports multiple GPIO controllers is currently a
platform-specific implementation issue.
diff --git a/Documentation/ia64/kvm.txt b/Documentation/ia64/kvm.txt
new file mode 100644
index 000000000000..bec9d815da33
--- /dev/null
+++ b/Documentation/ia64/kvm.txt
@@ -0,0 +1,82 @@
+Currently, kvm module in EXPERIMENTAL stage on IA64. This means that
+interfaces are not stable enough to use. So, plase had better don't run
+critical applications in virtual machine. We will try our best to make it
+strong in future versions!
+ Guide: How to boot up guests on kvm/ia64
+
+This guide is to describe how to enable kvm support for IA-64 systems.
+
+1. Get the kvm source from git.kernel.org.
+ Userspace source:
+ git clone git://git.kernel.org/pub/scm/virt/kvm/kvm-userspace.git
+ Kernel Source:
+ git clone git://git.kernel.org/pub/scm/linux/kernel/git/xiantao/kvm-ia64.git
+
+2. Compile the source code.
+ 2.1 Compile userspace code:
+ (1)cd ./kvm-userspace
+ (2)./configure
+ (3)cd kernel
+ (4)make sync LINUX= $kernel_dir (kernel_dir is the directory of kernel source.)
+ (5)cd ..
+ (6)make qemu
+ (7)cd qemu; make install
+
+ 2.2 Compile kernel source code:
+ (1) cd ./$kernel_dir
+ (2) Make menuconfig
+ (3) Enter into virtualization option, and choose kvm.
+ (4) make
+ (5) Once (4) done, make modules_install
+ (6) Make initrd, and use new kernel to reboot up host machine.
+ (7) Once (6) done, cd $kernel_dir/arch/ia64/kvm
+ (8) insmod kvm.ko; insmod kvm-intel.ko
+
+Note: For step 2, please make sure that host page size == TARGET_PAGE_SIZE of qemu, otherwise, may fail.
+
+3. Get Guest Firmware named as Flash.fd, and put it under right place:
+ (1) If you have the guest firmware (binary) released by Intel Corp for Xen, use it directly.
+
+ (2) If you have no firmware at hand, Please download its source from
+ hg clone http://xenbits.xensource.com/ext/efi-vfirmware.hg
+ you can get the firmware's binary in the directory of efi-vfirmware.hg/binaries.
+
+ (3) Rename the firware you owned to Flash.fd, and copy it to /usr/local/share/qemu
+
+4. Boot up Linux or Windows guests:
+ 4.1 Create or install a image for guest boot. If you have xen experience, it should be easy.
+
+ 4.2 Boot up guests use the following command.
+ /usr/local/bin/qemu-system-ia64 -smp xx -m 512 -hda $your_image
+ (xx is the number of virtual processors for the guest, now the maximum value is 4)
+
+5. Known possibile issue on some platforms with old Firmware.
+
+If meet strange host crashe issues, try to solve it through either of the following ways:
+
+(1): Upgrade your Firmware to the latest one.
+
+(2): Applying the below patch to kernel source.
+diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
+index 0b53344..f02b0f7 100644
+--- a/arch/ia64/kernel/pal.S
++++ b/arch/ia64/kernel/pal.S
+@@ -84,7 +84,8 @@ GLOBAL_ENTRY(ia64_pal_call_static)
+ mov ar.pfs = loc1
+ mov rp = loc0
+ ;;
+- srlz.d // seralize restoration of psr.l
++ srlz.i // seralize restoration of psr.l
++ ;;
+ br.ret.sptk.many b0
+ END(ia64_pal_call_static)
+
+6. Bug report:
+ If you found any issues when use kvm/ia64, Please post the bug info to kvm-ia64-devel mailing list.
+ https://lists.sourceforge.net/lists/listinfo/kvm-ia64-devel/
+
+Thanks for your interest! Let's work together, and make kvm/ia64 stronger and stronger!
+
+
+ Xiantao Zhang <xiantao.zhang@intel.com>
+ 2008.3.10
diff --git a/Documentation/ide/ide-tape.txt b/Documentation/ide/ide-tape.txt
index 658f271a373f..3f348a0b21d8 100644
--- a/Documentation/ide/ide-tape.txt
+++ b/Documentation/ide/ide-tape.txt
@@ -1,146 +1,65 @@
-/*
- * IDE ATAPI streaming tape driver.
- *
- * This driver is a part of the Linux ide driver.
- *
- * The driver, in co-operation with ide.c, basically traverses the
- * request-list for the block device interface. The character device
- * interface, on the other hand, creates new requests, adds them
- * to the request-list of the block device, and waits for their completion.
- *
- * Pipelined operation mode is now supported on both reads and writes.
- *
- * The block device major and minor numbers are determined from the
- * tape's relative position in the ide interfaces, as explained in ide.c.
- *
- * The character device interface consists of the following devices:
- *
- * ht0 major 37, minor 0 first IDE tape, rewind on close.
- * ht1 major 37, minor 1 second IDE tape, rewind on close.
- * ...
- * nht0 major 37, minor 128 first IDE tape, no rewind on close.
- * nht1 major 37, minor 129 second IDE tape, no rewind on close.
- * ...
- *
- * The general magnetic tape commands compatible interface, as defined by
- * include/linux/mtio.h, is accessible through the character device.
- *
- * General ide driver configuration options, such as the interrupt-unmask
- * flag, can be configured by issuing an ioctl to the block device interface,
- * as any other ide device.
- *
- * Our own ide-tape ioctl's can be issued to either the block device or
- * the character device interface.
- *
- * Maximal throughput with minimal bus load will usually be achieved in the
- * following scenario:
- *
- * 1. ide-tape is operating in the pipelined operation mode.
- * 2. No buffering is performed by the user backup program.
- *
- * Testing was done with a 2 GB CONNER CTMA 4000 IDE ATAPI Streaming Tape Drive.
- *
- * Here are some words from the first releases of hd.c, which are quoted
- * in ide.c and apply here as well:
- *
- * | Special care is recommended. Have Fun!
- *
- *
- * An overview of the pipelined operation mode.
- *
- * In the pipelined write mode, we will usually just add requests to our
- * pipeline and return immediately, before we even start to service them. The
- * user program will then have enough time to prepare the next request while
- * we are still busy servicing previous requests. In the pipelined read mode,
- * the situation is similar - we add read-ahead requests into the pipeline,
- * before the user even requested them.
- *
- * The pipeline can be viewed as a "safety net" which will be activated when
- * the system load is high and prevents the user backup program from keeping up
- * with the current tape speed. At this point, the pipeline will get
- * shorter and shorter but the tape will still be streaming at the same speed.
- * Assuming we have enough pipeline stages, the system load will hopefully
- * decrease before the pipeline is completely empty, and the backup program
- * will be able to "catch up" and refill the pipeline again.
- *
- * When using the pipelined mode, it would be best to disable any type of
- * buffering done by the user program, as ide-tape already provides all the
- * benefits in the kernel, where it can be done in a more efficient way.
- * As we will usually not block the user program on a request, the most
- * efficient user code will then be a simple read-write-read-... cycle.
- * Any additional logic will usually just slow down the backup process.
- *
- * Using the pipelined mode, I get a constant over 400 KBps throughput,
- * which seems to be the maximum throughput supported by my tape.
- *
- * However, there are some downfalls:
- *
- * 1. We use memory (for data buffers) in proportional to the number
- * of pipeline stages (each stage is about 26 KB with my tape).
- * 2. In the pipelined write mode, we cheat and postpone error codes
- * to the user task. In read mode, the actual tape position
- * will be a bit further than the last requested block.
- *
- * Concerning (1):
- *
- * 1. We allocate stages dynamically only when we need them. When
- * we don't need them, we don't consume additional memory. In
- * case we can't allocate stages, we just manage without them
- * (at the expense of decreased throughput) so when Linux is
- * tight in memory, we will not pose additional difficulties.
- *
- * 2. The maximum number of stages (which is, in fact, the maximum
- * amount of memory) which we allocate is limited by the compile
- * time parameter IDETAPE_MAX_PIPELINE_STAGES.
- *
- * 3. The maximum number of stages is a controlled parameter - We
- * don't start from the user defined maximum number of stages
- * but from the lower IDETAPE_MIN_PIPELINE_STAGES (again, we
- * will not even allocate this amount of stages if the user
- * program can't handle the speed). We then implement a feedback
- * loop which checks if the pipeline is empty, and if it is, we
- * increase the maximum number of stages as necessary until we
- * reach the optimum value which just manages to keep the tape
- * busy with minimum allocated memory or until we reach
- * IDETAPE_MAX_PIPELINE_STAGES.
- *
- * Concerning (2):
- *
- * In pipelined write mode, ide-tape can not return accurate error codes
- * to the user program since we usually just add the request to the
- * pipeline without waiting for it to be serviced. In case an error
- * occurs, I will report it on the next user request.
- *
- * In the pipelined read mode, subsequent read requests or forward
- * filemark spacing will perform correctly, as we preserve all blocks
- * and filemarks which we encountered during our excess read-ahead.
- *
- * For accurate tape positioning and error reporting, disabling
- * pipelined mode might be the best option.
- *
- * You can enable/disable/tune the pipelined operation mode by adjusting
- * the compile time parameters below.
- *
- *
- * Possible improvements.
- *
- * 1. Support for the ATAPI overlap protocol.
- *
- * In order to maximize bus throughput, we currently use the DSC
- * overlap method which enables ide.c to service requests from the
- * other device while the tape is busy executing a command. The
- * DSC overlap method involves polling the tape's status register
- * for the DSC bit, and servicing the other device while the tape
- * isn't ready.
- *
- * In the current QIC development standard (December 1995),
- * it is recommended that new tape drives will *in addition*
- * implement the ATAPI overlap protocol, which is used for the
- * same purpose - efficient use of the IDE bus, but is interrupt
- * driven and thus has much less CPU overhead.
- *
- * ATAPI overlap is likely to be supported in most new ATAPI
- * devices, including new ATAPI cdroms, and thus provides us
- * a method by which we can achieve higher throughput when
- * sharing a (fast) ATA-2 disk with any (slow) new ATAPI device.
- */
+IDE ATAPI streaming tape driver.
+
+This driver is a part of the Linux ide driver.
+
+The driver, in co-operation with ide.c, basically traverses the
+request-list for the block device interface. The character device
+interface, on the other hand, creates new requests, adds them
+to the request-list of the block device, and waits for their completion.
+
+The block device major and minor numbers are determined from the
+tape's relative position in the ide interfaces, as explained in ide.c.
+
+The character device interface consists of the following devices:
+
+ht0 major 37, minor 0 first IDE tape, rewind on close.
+ht1 major 37, minor 1 second IDE tape, rewind on close.
+...
+nht0 major 37, minor 128 first IDE tape, no rewind on close.
+nht1 major 37, minor 129 second IDE tape, no rewind on close.
+...
+
+The general magnetic tape commands compatible interface, as defined by
+include/linux/mtio.h, is accessible through the character device.
+
+General ide driver configuration options, such as the interrupt-unmask
+flag, can be configured by issuing an ioctl to the block device interface,
+as any other ide device.
+
+Our own ide-tape ioctl's can be issued to either the block device or
+the character device interface.
+
+Maximal throughput with minimal bus load will usually be achieved in the
+following scenario:
+
+ 1. ide-tape is operating in the pipelined operation mode.
+ 2. No buffering is performed by the user backup program.
+
+Testing was done with a 2 GB CONNER CTMA 4000 IDE ATAPI Streaming Tape Drive.
+
+Here are some words from the first releases of hd.c, which are quoted
+in ide.c and apply here as well:
+
+| Special care is recommended. Have Fun!
+
+Possible improvements:
+
+1. Support for the ATAPI overlap protocol.
+
+In order to maximize bus throughput, we currently use the DSC
+overlap method which enables ide.c to service requests from the
+other device while the tape is busy executing a command. The
+DSC overlap method involves polling the tape's status register
+for the DSC bit, and servicing the other device while the tape
+isn't ready.
+
+In the current QIC development standard (December 1995),
+it is recommended that new tape drives will *in addition*
+implement the ATAPI overlap protocol, which is used for the
+same purpose - efficient use of the IDE bus, but is interrupt
+driven and thus has much less CPU overhead.
+
+ATAPI overlap is likely to be supported in most new ATAPI
+devices, including new ATAPI cdroms, and thus provides us
+a method by which we can achieve higher throughput when
+sharing a (fast) ATA-2 disk with any (slow) new ATAPI device.
diff --git a/Documentation/ide/ide.txt b/Documentation/ide/ide.txt
index 486c699f4aea..0c78f4b1d9d9 100644
--- a/Documentation/ide/ide.txt
+++ b/Documentation/ide/ide.txt
@@ -82,27 +82,26 @@ Drives are normally found by auto-probing and/or examining the CMOS/BIOS data.
For really weird situations, the apparent (fdisk) geometry can also be specified
on the kernel "command line" using LILO. The format of such lines is:
- hdx=cyls,heads,sects
-or hdx=cdrom
+ ide_core.chs=[interface_number.device_number]:cyls,heads,sects
+or ide_core.cdrom=[interface_number.device_number]
-where hdx can be any of hda through hdh, Three values are required
-(cyls,heads,sects). For example:
+For example:
- hdc=1050,32,64 hdd=cdrom
+ ide_core.chs=1.0:1050,32,64 ide_core.cdrom=1.1
-either {hda,hdb} or {hdc,hdd}. The results of successful auto-probing may
-override the physical geometry/irq specified, though the "original" geometry
-may be retained as the "logical" geometry for partitioning purposes (fdisk).
+The results of successful auto-probing may override the physical geometry/irq
+specified, though the "original" geometry may be retained as the "logical"
+geometry for partitioning purposes (fdisk).
If the auto-probing during boot time confuses a drive (ie. the drive works
with hd.c but not with ide.c), then an command line option may be specified
for each drive for which you'd like the drive to skip the hardware
probe/identification sequence. For example:
- hdb=noprobe
+ ide_core.noprobe=0.1
or
- hdc=768,16,32
- hdc=noprobe
+ ide_core.chs=1.0:768,16,32
+ ide_core.noprobe=1.0
Note that when only one IDE device is attached to an interface, it should be
jumpered as "single" or "master", *not* "slave". Many folks have had
@@ -118,9 +117,9 @@ If for some reason your cdrom drive is *not* found at boot time, you can force
the probe to look harder by supplying a kernel command line parameter
via LILO, such as:
- hdc=cdrom /* hdc = "master" on second interface */
+ ide_core.cdrom=1.0 /* "master" on second interface (hdc) */
or
- hdd=cdrom /* hdd = "slave" on second interface */
+ ide_core.cdrom=1.1 /* "slave" on second interface (hdd) */
For example, a GW2000 system might have a hard drive on the primary
interface (/dev/hda) and an IDE cdrom drive on the secondary interface
@@ -174,9 +173,7 @@ to /etc/modprobe.conf.
When ide.c is used as a module, you can pass command line parameters to the
driver using the "options=" keyword to insmod, while replacing any ',' with
-';'. For example:
-
- insmod ide.o options="hda=nodma hdb=nodma"
+';'.
================================================================================
@@ -184,57 +181,6 @@ driver using the "options=" keyword to insmod, while replacing any ',' with
Summary of ide driver parameters for kernel command line
--------------------------------------------------------
- "hdx=" is recognized for all "x" from "a" to "u", such as "hdc".
-
- "idex=" is recognized for all "x" from "0" to "9", such as "ide1".
-
- "hdx=noprobe" : drive may be present, but do not probe for it
-
- "hdx=none" : drive is NOT present, ignore cmos and do not probe
-
- "hdx=nowerr" : ignore the WRERR_STAT bit on this drive
-
- "hdx=cdrom" : drive is present, and is a cdrom drive
-
- "hdx=cyl,head,sect" : disk drive is present, with specified geometry
-
- "hdx=autotune" : driver will attempt to tune interface speed
- to the fastest PIO mode supported,
- if possible for this drive only.
- Not fully supported by all chipset types,
- and quite likely to cause trouble with
- older/odd IDE drives.
-
- "hdx=nodma" : disallow DMA
-
- "idebus=xx" : inform IDE driver of VESA/PCI bus speed in MHz,
- where "xx" is between 20 and 66 inclusive,
- used when tuning chipset PIO modes.
- For PCI bus, 25 is correct for a P75 system,
- 30 is correct for P90,P120,P180 systems,
- and 33 is used for P100,P133,P166 systems.
- If in doubt, use idebus=33 for PCI.
- As for VLB, it is safest to not specify it.
- Bigger values are safer than smaller ones.
-
- "idex=serialize" : do not overlap operations on idex. Please note
- that you will have to specify this option for
- both the respective primary and secondary channel
- to take effect.
-
- "idex=reset" : reset interface after probe
-
- "idex=ata66" : informs the interface that it has an 80c cable
- for chipsets that are ATA-66 capable, but the
- ability to bit test for detection is currently
- unknown.
-
- "ide=doubler" : probe/support IDE doublers on Amiga
-
-There may be more options than shown -- use the source, Luke!
-
-Everything else is rejected with a "BAD OPTION" message.
-
For legacy IDE VLB host drivers (ali14xx/dtc2278/ht6560b/qd65xx/umc8672)
you need to explicitly enable probing by using "probe" kernel parameter,
i.e. to enable probing for ALI M14xx chipsets (ali14xx host driver) use:
@@ -251,6 +197,33 @@ are detected automatically).
You also need to use "probe" kernel parameter for ide-4drives driver
(support for IDE generic chipset with four drives on one port).
+To enable support for IDE doublers on Amiga use "doubler" kernel parameter
+for gayle host driver (i.e. "gayle.doubler" if the driver is built-in).
+
+To force ignoring cable detection (this should be needed only if you're using
+short 40-wires cable which cannot be automatically detected - if this is not
+a case please report it as a bug instead) use "ignore_cable" kernel parameter:
+
+* "ide_core.ignore_cable=[interface_number]" boot option if IDE is built-in
+ (i.e. "ide_core.ignore_cable=1" to force ignoring cable for "ide1")
+
+* "ignore_cable=[interface_number]" module parameter (for ide_core module)
+ if IDE is compiled as module
+
+Other kernel parameters for ide_core are:
+
+* "nodma=[interface_number.device_number]" to disallow DMA for a device
+
+* "noflush=[interface_number.device_number]" to disable flush requests
+
+* "noprobe=[interface_number.device_number]" to skip probing
+
+* "nowerr=[interface_number.device_number]" to ignore the WRERR_STAT bit
+
+* "cdrom=[interface_number.device_number]" to force device as a CD-ROM
+
+* "chs=[interface_number.device_number]" to force device as a disk (using CHS)
+
================================================================================
Some Terminology
diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt
index c18363bd8d11..240ce7a56c40 100644
--- a/Documentation/ioctl-number.txt
+++ b/Documentation/ioctl-number.txt
@@ -183,6 +183,8 @@ Code Seq# Include File Comments
0xAC 00-1F linux/raw.h
0xAD 00 Netfilter device in development:
<mailto:rusty@rustcorp.com.au>
+0xAE all linux/kvm.h Kernel-based Virtual Machine
+ <mailto:kvm-devel@lists.sourceforge.net>
0xB0 all RATIO devices in development:
<mailto:vgo@ratio.de>
0xB1 00-1F PPPoX <mailto:mostrows@styx.uwaterloo.ca>
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index bf6303ec0bde..e5f3d918316f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -772,10 +772,6 @@ and is between 256 and 4096 characters. It is defined in the file
Format: ide=nodma or ide=doubler
See Documentation/ide/ide.txt.
- ide?= [HW] (E)IDE subsystem
- Format: ide?=ata66 or chipset specific parameters.
- See Documentation/ide/ide.txt.
-
idebus= [HW] (E)IDE subsystem - VLB/PCI bus speed
See Documentation/ide/ide.txt.
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index be89f393274f..6877e7187113 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -37,6 +37,11 @@ registration function such as register_kprobe() specifies where
the probe is to be inserted and what handler is to be called when
the probe is hit.
+There are also register_/unregister_*probes() functions for batch
+registration/unregistration of a group of *probes. These functions
+can speed up unregistration process when you have to unregister
+a lot of probes at once.
+
The next three subsections explain how the different types of
probes work. They explain certain things that you'll need to
know in order to make the best use of Kprobes -- e.g., the
@@ -190,10 +195,11 @@ code mapping.
4. API Reference
The Kprobes API includes a "register" function and an "unregister"
-function for each type of probe. Here are terse, mini-man-page
-specifications for these functions and the associated probe handlers
-that you'll write. See the files in the samples/kprobes/ sub-directory
-for examples.
+function for each type of probe. The API also includes "register_*probes"
+and "unregister_*probes" functions for (un)registering arrays of probes.
+Here are terse, mini-man-page specifications for these functions and
+the associated probe handlers that you'll write. See the files in the
+samples/kprobes/ sub-directory for examples.
4.1 register_kprobe
@@ -319,6 +325,43 @@ void unregister_kretprobe(struct kretprobe *rp);
Removes the specified probe. The unregister function can be called
at any time after the probe has been registered.
+NOTE:
+If the functions find an incorrect probe (ex. an unregistered probe),
+they clear the addr field of the probe.
+
+4.5 register_*probes
+
+#include <linux/kprobes.h>
+int register_kprobes(struct kprobe **kps, int num);
+int register_kretprobes(struct kretprobe **rps, int num);
+int register_jprobes(struct jprobe **jps, int num);
+
+Registers each of the num probes in the specified array. If any
+error occurs during registration, all probes in the array, up to
+the bad probe, are safely unregistered before the register_*probes
+function returns.
+- kps/rps/jps: an array of pointers to *probe data structures
+- num: the number of the array entries.
+
+NOTE:
+You have to allocate(or define) an array of pointers and set all
+of the array entries before using these functions.
+
+4.6 unregister_*probes
+
+#include <linux/kprobes.h>
+void unregister_kprobes(struct kprobe **kps, int num);
+void unregister_kretprobes(struct kretprobe **rps, int num);
+void unregister_jprobes(struct jprobe **jps, int num);
+
+Removes each of the num probes in the specified array at once.
+
+NOTE:
+If the functions find some incorrect probes (ex. unregistered
+probes) in the specified array, they clear the addr field of those
+incorrect probes. However, other probes in the array are
+unregistered correctly.
+
5. Kprobes Features and Limitations
Kprobes allows multiple probes at the same address. Currently,
diff --git a/Documentation/md.txt b/Documentation/md.txt
index 396cdd982c26..a8b430627473 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -450,3 +450,9 @@ These currently include
there are upper and lower limits (32768, 16). Default is 128.
strip_cache_active (currently raid5 only)
number of active entries in the stripe cache
+ preread_bypass_threshold (currently raid5 only)
+ number of times a stripe requiring preread will be bypassed by
+ a stripe that does not require preread. For fairness defaults
+ to 1. Setting this to 0 disables bypass accounting and
+ requires preread stripes to wait until all full-width stripe-
+ writes are complete. Valid values are 0 to stripe_cache_size.
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index cf89e8cfd5bf..1d2a772506cf 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -2836,6 +2836,39 @@ platforms are moved over to use the flattened-device-tree model.
big-endian;
};
+ r) Freescale Display Interface Unit
+
+ The Freescale DIU is a LCD controller, with proper hardware, it can also
+ drive DVI monitors.
+
+ Required properties:
+ - compatible : should be "fsl-diu".
+ - reg : should contain at least address and length of the DIU register
+ set.
+ - Interrupts : one DIU interrupt should be describe here.
+
+ Example (MPC8610HPCD)
+ display@2c000 {
+ compatible = "fsl,diu";
+ reg = <0x2c000 100>;
+ interrupts = <72 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ s) Freescale on board FPGA
+
+ This is the memory-mapped registers for on board FPGA.
+
+ Required properities:
+ - compatible : should be "fsl,fpga-pixis".
+ - reg : should contain the address and the lenght of the FPPGA register
+ set.
+
+ Example (MPC8610HPCD)
+ board-control@e8000000 {
+ compatible = "fsl,fpga-pixis";
+ reg = <0xe8000000 32>;
+ };
VII - Marvell Discovery mv64[345]6x System Controller chips
===========================================================
diff --git a/Documentation/powerpc/kvm_440.txt b/Documentation/powerpc/kvm_440.txt
new file mode 100644
index 000000000000..c02a003fa03a
--- /dev/null
+++ b/Documentation/powerpc/kvm_440.txt
@@ -0,0 +1,41 @@
+Hollis Blanchard <hollisb@us.ibm.com>
+15 Apr 2008
+
+Various notes on the implementation of KVM for PowerPC 440:
+
+To enforce isolation, host userspace, guest kernel, and guest userspace all
+run at user privilege level. Only the host kernel runs in supervisor mode.
+Executing privileged instructions in the guest traps into KVM (in the host
+kernel), where we decode and emulate them. Through this technique, unmodified
+440 Linux kernels can be run (slowly) as guests. Future performance work will
+focus on reducing the overhead and frequency of these traps.
+
+The usual code flow is started from userspace invoking an "run" ioctl, which
+causes KVM to switch into guest context. We use IVPR to hijack the host
+interrupt vectors while running the guest, which allows us to direct all
+interrupts to kvmppc_handle_interrupt(). At this point, we could either
+- handle the interrupt completely (e.g. emulate "mtspr SPRG0"), or
+- let the host interrupt handler run (e.g. when the decrementer fires), or
+- return to host userspace (e.g. when the guest performs device MMIO)
+
+Address spaces: We take advantage of the fact that Linux doesn't use the AS=1
+address space (in host or guest), which gives us virtual address space to use
+for guest mappings. While the guest is running, the host kernel remains mapped
+in AS=0, but the guest can only use AS=1 mappings.
+
+TLB entries: The TLB entries covering the host linear mapping remain
+present while running the guest. This reduces the overhead of lightweight
+exits, which are handled by KVM running in the host kernel. We keep three
+copies of the TLB:
+ - guest TLB: contents of the TLB as the guest sees it
+ - shadow TLB: the TLB that is actually in hardware while guest is running
+ - host TLB: to restore TLB state when context switching guest -> host
+When a TLB miss occurs because a mapping was not present in the shadow TLB,
+but was present in the guest TLB, KVM handles the fault without invoking the
+guest. Large guest pages are backed by multiple 4KB shadow pages through this
+mechanism.
+
+IO: MMIO and DCR accesses are emulated by userspace. We use virtio for network
+and block IO, so those drivers must be enabled in the guest. It's possible
+that some qemu device emulation (e.g. e1000 or rtl8139) may also work with
+little effort.
diff --git a/Documentation/s390/kvm.txt b/Documentation/s390/kvm.txt
new file mode 100644
index 000000000000..6f5ceb0f09fc
--- /dev/null
+++ b/Documentation/s390/kvm.txt
@@ -0,0 +1,125 @@
+*** BIG FAT WARNING ***
+The kvm module is currently in EXPERIMENTAL state for s390. This means that
+the interface to the module is not yet considered to remain stable. Thus, be
+prepared that we keep breaking your userspace application and guest
+compatibility over and over again until we feel happy with the result. Make sure
+your guest kernel, your host kernel, and your userspace launcher are in a
+consistent state.
+
+This Documentation describes the unique ioctl calls to /dev/kvm, the resulting
+kvm-vm file descriptors, and the kvm-vcpu file descriptors that differ from x86.
+
+1. ioctl calls to /dev/kvm
+KVM does support the following ioctls on s390 that are common with other
+architectures and do behave the same:
+KVM_GET_API_VERSION
+KVM_CREATE_VM (*) see note
+KVM_CHECK_EXTENSION
+KVM_GET_VCPU_MMAP_SIZE
+
+Notes:
+* KVM_CREATE_VM may fail on s390, if the calling process has multiple
+threads and has not called KVM_S390_ENABLE_SIE before.
+
+In addition, on s390 the following architecture specific ioctls are supported:
+ioctl: KVM_S390_ENABLE_SIE
+args: none
+see also: include/linux/kvm.h
+This call causes the kernel to switch on PGSTE in the user page table. This
+operation is needed in order to run a virtual machine, and it requires the
+calling process to be single-threaded. Note that the first call to KVM_CREATE_VM
+will implicitly try to switch on PGSTE if the user process has not called
+KVM_S390_ENABLE_SIE before. User processes that want to launch multiple threads
+before creating a virtual machine have to call KVM_S390_ENABLE_SIE, or will
+observe an error calling KVM_CREATE_VM. Switching on PGSTE is a one-time
+operation, is not reversible, and will persist over the entire lifetime of
+the calling process. It does not have any user-visible effect other than a small
+performance penalty.
+
+2. ioctl calls to the kvm-vm file descriptor
+KVM does support the following ioctls on s390 that are common with other
+architectures and do behave the same:
+KVM_CREATE_VCPU
+KVM_SET_USER_MEMORY_REGION (*) see note
+KVM_GET_DIRTY_LOG (**) see note
+
+Notes:
+* kvm does only allow exactly one memory slot on s390, which has to start
+ at guest absolute address zero and at a user address that is aligned on any
+ page boundary. This hardware "limitation" allows us to have a few unique
+ optimizations. The memory slot doesn't have to be filled
+ with memory actually, it may contain sparse holes. That said, with different
+ user memory layout this does still allow a large flexibility when
+ doing the guest memory setup.
+** KVM_GET_DIRTY_LOG doesn't work properly yet. The user will receive an empty
+log. This ioctl call is only needed for guest migration, and we intend to
+implement this one in the future.
+
+In addition, on s390 the following architecture specific ioctls for the kvm-vm
+file descriptor are supported:
+ioctl: KVM_S390_INTERRUPT
+args: struct kvm_s390_interrupt *
+see also: include/linux/kvm.h
+This ioctl is used to submit a floating interrupt for a virtual machine.
+Floating interrupts may be delivered to any virtual cpu in the configuration.
+Only some interrupt types defined in include/linux/kvm.h make sense when
+submitted as floating interrupts. The following interrupts are not considered
+to be useful as floating interrupts, and a call to inject them will result in
+-EINVAL error code: program interrupts and interprocessor signals. Valid
+floating interrupts are:
+KVM_S390_INT_VIRTIO
+KVM_S390_INT_SERVICE
+
+3. ioctl calls to the kvm-vcpu file descriptor
+KVM does support the following ioctls on s390 that are common with other
+architectures and do behave the same:
+KVM_RUN
+KVM_GET_REGS
+KVM_SET_REGS
+KVM_GET_SREGS
+KVM_SET_SREGS
+KVM_GET_FPU
+KVM_SET_FPU
+
+In addition, on s390 the following architecture specific ioctls for the
+kvm-vcpu file descriptor are supported:
+ioctl: KVM_S390_INTERRUPT
+args: struct kvm_s390_interrupt *
+see also: include/linux/kvm.h
+This ioctl is used to submit an interrupt for a specific virtual cpu.
+Only some interrupt types defined in include/linux/kvm.h make sense when
+submitted for a specific cpu. The following interrupts are not considered
+to be useful, and a call to inject them will result in -EINVAL error code:
+service processor calls and virtio interrupts. Valid interrupt types are:
+KVM_S390_PROGRAM_INT
+KVM_S390_SIGP_STOP
+KVM_S390_RESTART
+KVM_S390_SIGP_SET_PREFIX
+KVM_S390_INT_EMERGENCY
+
+ioctl: KVM_S390_STORE_STATUS
+args: unsigned long
+see also: include/linux/kvm.h
+This ioctl stores the state of the cpu at the guest real address given as
+argument, unless one of the following values defined in include/linux/kvm.h
+is given as arguement:
+KVM_S390_STORE_STATUS_NOADDR - the CPU stores its status to the save area in
+absolute lowcore as defined by the principles of operation
+KVM_S390_STORE_STATUS_PREFIXED - the CPU stores its status to the save area in
+its prefix page just like the dump tool that comes with zipl. This is useful
+to create a system dump for use with lkcdutils or crash.
+
+ioctl: KVM_S390_SET_INITIAL_PSW
+args: struct kvm_s390_psw *
+see also: include/linux/kvm.h
+This ioctl can be used to set the processor status word (psw) of a stopped cpu
+prior to running it with KVM_RUN. Note that this call is not required to modify
+the psw during sie intercepts that fall back to userspace because struct kvm_run
+does contain the psw, and this value is evaluated during reentry of KVM_RUN
+after the intercept exit was recognized.
+
+ioctl: KVM_S390_INITIAL_RESET
+args: none
+see also: include/linux/kvm.h
+This ioctl can be used to perform an initial cpu reset as defined by the
+principles of operation. The target cpu has to be in stopped state.
diff --git a/Documentation/spi/spidev b/Documentation/spi/spidev
index 5c8e1b988a08..ed2da5e5b28a 100644
--- a/Documentation/spi/spidev
+++ b/Documentation/spi/spidev
@@ -126,8 +126,8 @@ NOTES:
FULL DUPLEX CHARACTER DEVICE API
================================
-See the sample program below for one example showing the use of the full
-duplex programming interface. (Although it doesn't perform a full duplex
+See the spidev_fdx.c sample program for one example showing the use of the
+full duplex programming interface. (Although it doesn't perform a full duplex
transfer.) The model is the same as that used in the kernel spi_sync()
request; the individual transfers offer the same capabilities as are
available to kernel drivers (except that it's not asynchronous).
@@ -141,167 +141,3 @@ and bitrate for each transfer segment.)
To make a full duplex request, provide both rx_buf and tx_buf for the
same transfer. It's even OK if those are the same buffer.
-
-
-SAMPLE PROGRAM
-==============
-
--------------------------------- CUT HERE
-#include <stdio.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <fcntl.h>
-#include <string.h>
-
-#include <sys/ioctl.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-
-#include <linux/types.h>
-#include <linux/spi/spidev.h>
-
-
-static int verbose;
-
-static void do_read(int fd, int len)
-{
- unsigned char buf[32], *bp;
- int status;
-
- /* read at least 2 bytes, no more than 32 */
- if (len < 2)
- len = 2;
- else if (len > sizeof(buf))
- len = sizeof(buf);
- memset(buf, 0, sizeof buf);
-
- status = read(fd, buf, len);
- if (status < 0) {
- perror("read");
- return;
- }
- if (status != len) {
- fprintf(stderr, "short read\n");
- return;
- }
-
- printf("read(%2d, %2d): %02x %02x,", len, status,
- buf[0], buf[1]);
- status -= 2;
- bp = buf + 2;
- while (status-- > 0)
- printf(" %02x", *bp++);
- printf("\n");
-}
-
-static void do_msg(int fd, int len)
-{
- struct spi_ioc_transfer xfer[2];
- unsigned char buf[32], *bp;
- int status;
-
- memset(xfer, 0, sizeof xfer);
- memset(buf, 0, sizeof buf);
-
- if (len > sizeof buf)
- len = sizeof buf;
-
- buf[0] = 0xaa;
- xfer[0].tx_buf = (__u64) buf;
- xfer[0].len = 1;
-
- xfer[1].rx_buf = (__u64) buf;
- xfer[1].len = len;
-
- status = ioctl(fd, SPI_IOC_MESSAGE(2), xfer);
- if (status < 0) {
- perror("SPI_IOC_MESSAGE");
- return;
- }
-
- printf("response(%2d, %2d): ", len, status);
- for (bp = buf; len; len--)
- printf(" %02x", *bp++);
- printf("\n");
-}
-
-static void dumpstat(const char *name, int fd)
-{
- __u8 mode, lsb, bits;
- __u32 speed;
-
- if (ioctl(fd, SPI_IOC_RD_MODE, &mode) < 0) {
- perror("SPI rd_mode");
- return;
- }
- if (ioctl(fd, SPI_IOC_RD_LSB_FIRST, &lsb) < 0) {
- perror("SPI rd_lsb_fist");
- return;
- }
- if (ioctl(fd, SPI_IOC_RD_BITS_PER_WORD, &bits) < 0) {
- perror("SPI bits_per_word");
- return;
- }
- if (ioctl(fd, SPI_IOC_RD_MAX_SPEED_HZ, &speed) < 0) {
- perror("SPI max_speed_hz");
- return;
- }
-
- printf("%s: spi mode %d, %d bits %sper word, %d Hz max\n",
- name, mode, bits, lsb ? "(lsb first) " : "", speed);
-}
-
-int main(int argc, char **argv)
-{
- int c;
- int readcount = 0;
- int msglen = 0;
- int fd;
- const char *name;
-
- while ((c = getopt(argc, argv, "hm:r:v")) != EOF) {
- switch (c) {
- case 'm':
- msglen = atoi(optarg);
- if (msglen < 0)
- goto usage;
- continue;
- case 'r':
- readcount = atoi(optarg);
- if (readcount < 0)
- goto usage;
- continue;
- case 'v':
- verbose++;
- continue;
- case 'h':
- case '?':
-usage:
- fprintf(stderr,
- "usage: %s [-h] [-m N] [-r N] /dev/spidevB.D\n",
- argv[0]);
- return 1;
- }
- }
-
- if ((optind + 1) != argc)
- goto usage;
- name = argv[optind];
-
- fd = open(name, O_RDWR);
- if (fd < 0) {
- perror("open");
- return 1;
- }
-
- dumpstat(name, fd);
-
- if (msglen)
- do_msg(fd, msglen);
-
- if (readcount)
- do_read(fd, readcount);
-
- close(fd);
- return 0;
-}
diff --git a/Documentation/spi/spidev_fdx.c b/Documentation/spi/spidev_fdx.c
new file mode 100644
index 000000000000..fc354f760384
--- /dev/null
+++ b/Documentation/spi/spidev_fdx.c
@@ -0,0 +1,158 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <string.h>
+
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <linux/types.h>
+#include <linux/spi/spidev.h>
+
+
+static int verbose;
+
+static void do_read(int fd, int len)
+{
+ unsigned char buf[32], *bp;
+ int status;
+
+ /* read at least 2 bytes, no more than 32 */
+ if (len < 2)
+ len = 2;
+ else if (len > sizeof(buf))
+ len = sizeof(buf);
+ memset(buf, 0, sizeof buf);
+
+ status = read(fd, buf, len);
+ if (status < 0) {
+ perror("read");
+ return;
+ }
+ if (status != len) {
+ fprintf(stderr, "short read\n");
+ return;
+ }
+
+ printf("read(%2d, %2d): %02x %02x,", len, status,
+ buf[0], buf[1]);
+ status -= 2;
+ bp = buf + 2;
+ while (status-- > 0)
+ printf(" %02x", *bp++);
+ printf("\n");
+}
+
+static void do_msg(int fd, int len)
+{
+ struct spi_ioc_transfer xfer[2];
+ unsigned char buf[32], *bp;
+ int status;
+
+ memset(xfer, 0, sizeof xfer);
+ memset(buf, 0, sizeof buf);
+
+ if (len > sizeof buf)
+ len = sizeof buf;
+
+ buf[0] = 0xaa;
+ xfer[0].tx_buf = (__u64) buf;
+ xfer[0].len = 1;
+
+ xfer[1].rx_buf = (__u64) buf;
+ xfer[1].len = len;
+
+ status = ioctl(fd, SPI_IOC_MESSAGE(2), xfer);
+ if (status < 0) {
+ perror("SPI_IOC_MESSAGE");
+ return;
+ }
+
+ printf("response(%2d, %2d): ", len, status);
+ for (bp = buf; len; len--)
+ printf(" %02x", *bp++);
+ printf("\n");
+}
+
+static void dumpstat(const char *name, int fd)
+{
+ __u8 mode, lsb, bits;
+ __u32 speed;
+
+ if (ioctl(fd, SPI_IOC_RD_MODE, &mode) < 0) {
+ perror("SPI rd_mode");
+ return;
+ }
+ if (ioctl(fd, SPI_IOC_RD_LSB_FIRST, &lsb) < 0) {
+ perror("SPI rd_lsb_fist");
+ return;
+ }
+ if (ioctl(fd, SPI_IOC_RD_BITS_PER_WORD, &bits) < 0) {
+ perror("SPI bits_per_word");
+ return;
+ }
+ if (ioctl(fd, SPI_IOC_RD_MAX_SPEED_HZ, &speed) < 0) {
+ perror("SPI max_speed_hz");
+ return;
+ }
+
+ printf("%s: spi mode %d, %d bits %sper word, %d Hz max\n",
+ name, mode, bits, lsb ? "(lsb first) " : "", speed);
+}
+
+int main(int argc, char **argv)
+{
+ int c;
+ int readcount = 0;
+ int msglen = 0;
+ int fd;
+ const char *name;
+
+ while ((c = getopt(argc, argv, "hm:r:v")) != EOF) {
+ switch (c) {
+ case 'm':
+ msglen = atoi(optarg);
+ if (msglen < 0)
+ goto usage;
+ continue;
+ case 'r':
+ readcount = atoi(optarg);
+ if (readcount < 0)
+ goto usage;
+ continue;
+ case 'v':
+ verbose++;
+ continue;
+ case 'h':
+ case '?':
+usage:
+ fprintf(stderr,
+ "usage: %s [-h] [-m N] [-r N] /dev/spidevB.D\n",
+ argv[0]);
+ return 1;
+ }
+ }
+
+ if ((optind + 1) != argc)
+ goto usage;
+ name = argv[optind];
+
+ fd = open(name, O_RDWR);
+ if (fd < 0) {
+ perror("open");
+ return 1;
+ }
+
+ dumpstat(name, fd);
+
+ if (msglen)
+ do_msg(fd, msglen);
+
+ if (readcount)
+ do_read(fd, readcount);
+
+ close(fd);
+ return 0;
+}
diff --git a/Documentation/vm/numa_memory_policy.txt b/Documentation/vm/numa_memory_policy.txt
index dd4986497996..bad16d3f6a47 100644
--- a/Documentation/vm/numa_memory_policy.txt
+++ b/Documentation/vm/numa_memory_policy.txt
@@ -135,77 +135,58 @@ most general to most specific:
Components of Memory Policies
- A Linux memory policy is a tuple consisting of a "mode" and an optional set
- of nodes. The mode determine the behavior of the policy, while the
- optional set of nodes can be viewed as the arguments to the behavior.
+ A Linux memory policy consists of a "mode", optional mode flags, and an
+ optional set of nodes. The mode determines the behavior of the policy,
+ the optional mode flags determine the behavior of the mode, and the
+ optional set of nodes can be viewed as the arguments to the policy
+ behavior.
Internally, memory policies are implemented by a reference counted
structure, struct mempolicy. Details of this structure will be discussed
in context, below, as required to explain the behavior.
- Note: in some functions AND in the struct mempolicy itself, the mode
- is called "policy". However, to avoid confusion with the policy tuple,
- this document will continue to use the term "mode".
-
Linux memory policy supports the following 4 behavioral modes:
- Default Mode--MPOL_DEFAULT: The behavior specified by this mode is
- context or scope dependent.
-
- As mentioned in the Policy Scope section above, during normal
- system operation, the System Default Policy is hard coded to
- contain the Default mode.
-
- In this context, default mode means "local" allocation--that is
- attempt to allocate the page from the node associated with the cpu
- where the fault occurs. If the "local" node has no memory, or the
- node's memory can be exhausted [no free pages available], local
- allocation will "fallback to"--attempt to allocate pages from--
- "nearby" nodes, in order of increasing "distance".
+ Default Mode--MPOL_DEFAULT: This mode is only used in the memory
+ policy APIs. Internally, MPOL_DEFAULT is converted to the NULL
+ memory policy in all policy scopes. Any existing non-default policy
+ will simply be removed when MPOL_DEFAULT is specified. As a result,
+ MPOL_DEFAULT means "fall back to the next most specific policy scope."
- Implementation detail -- subject to change: "Fallback" uses
- a per node list of sibling nodes--called zonelists--built at
- boot time, or when nodes or memory are added or removed from
- the system [memory hotplug]. These per node zonelist are
- constructed with nodes in order of increasing distance based
- on information provided by the platform firmware.
+ For example, a NULL or default task policy will fall back to the
+ system default policy. A NULL or default vma policy will fall
+ back to the task policy.
- When a task/process policy or a shared policy contains the Default
- mode, this also means "local allocation", as described above.
+ When specified in one of the memory policy APIs, the Default mode
+ does not use the optional set of nodes.
- In the context of a VMA, Default mode means "fall back to task
- policy"--which may or may not specify Default mode. Thus, Default
- mode can not be counted on to mean local allocation when used
- on a non-shared region of the address space. However, see
- MPOL_PREFERRED below.
-
- The Default mode does not use the optional set of nodes.
+ It is an error for the set of nodes specified for this policy to
+ be non-empty.
MPOL_BIND: This mode specifies that memory must come from the
- set of nodes specified by the policy.
-
- The memory policy APIs do not specify an order in which the nodes
- will be searched. However, unlike "local allocation", the Bind
- policy does not consider the distance between the nodes. Rather,
- allocations will fallback to the nodes specified by the policy in
- order of numeric node id. Like everything in Linux, this is subject
- to change.
+ set of nodes specified by the policy. Memory will be allocated from
+ the node in the set with sufficient free memory that is closest to
+ the node where the allocation takes place.
MPOL_PREFERRED: This mode specifies that the allocation should be
attempted from the single node specified in the policy. If that
- allocation fails, the kernel will search other nodes, exactly as
- it would for a local allocation that started at the preferred node
- in increasing distance from the preferred node. "Local" allocation
- policy can be viewed as a Preferred policy that starts at the node
+ allocation fails, the kernel will search other nodes, in order of
+ increasing distance from the preferred node based on information
+ provided by the platform firmware.
containing the cpu where the allocation takes place.
Internally, the Preferred policy uses a single node--the
- preferred_node member of struct mempolicy. A "distinguished
- value of this preferred_node, currently '-1', is interpreted
- as "the node containing the cpu where the allocation takes
- place"--local allocation. This is the way to specify
- local allocation for a specific range of addresses--i.e. for
- VMA policies.
+ preferred_node member of struct mempolicy. When the internal
+ mode flag MPOL_F_LOCAL is set, the preferred_node is ignored and
+ the policy is interpreted as local allocation. "Local" allocation
+ policy can be viewed as a Preferred policy that starts at the node
+ containing the cpu where the allocation takes place.
+
+ It is possible for the user to specify that local allocation is
+ always preferred by passing an empty nodemask with this mode.
+ If an empty nodemask is passed, the policy cannot use the
+ MPOL_F_STATIC_NODES or MPOL_F_RELATIVE_NODES flags described
+ below.
MPOL_INTERLEAVED: This mode specifies that page allocations be
interleaved, on a page granularity, across the nodes specified in
@@ -231,6 +212,154 @@ Components of Memory Policies
the temporary interleaved system default policy works in this
mode.
+ Linux memory policy supports the following optional mode flags:
+
+ MPOL_F_STATIC_NODES: This flag specifies that the nodemask passed by
+ the user should not be remapped if the task or VMA's set of allowed
+ nodes changes after the memory policy has been defined.
+
+ Without this flag, anytime a mempolicy is rebound because of a
+ change in the set of allowed nodes, the node (Preferred) or
+ nodemask (Bind, Interleave) is remapped to the new set of
+ allowed nodes. This may result in nodes being used that were
+ previously undesired.
+
+ With this flag, if the user-specified nodes overlap with the
+ nodes allowed by the task's cpuset, then the memory policy is
+ applied to their intersection. If the two sets of nodes do not
+ overlap, the Default policy is used.
+
+ For example, consider a task that is attached to a cpuset with
+ mems 1-3 that sets an Interleave policy over the same set. If
+ the cpuset's mems change to 3-5, the Interleave will now occur
+ over nodes 3, 4, and 5. With this flag, however, since only node
+ 3 is allowed from the user's nodemask, the "interleave" only
+ occurs over that node. If no nodes from the user's nodemask are
+ now allowed, the Default behavior is used.
+
+ MPOL_F_STATIC_NODES cannot be combined with the
+ MPOL_F_RELATIVE_NODES flag. It also cannot be used for
+ MPOL_PREFERRED policies that were created with an empty nodemask
+ (local allocation).
+
+ MPOL_F_RELATIVE_NODES: This flag specifies that the nodemask passed
+ by the user will be mapped relative to the set of the task or VMA's
+ set of allowed nodes. The kernel stores the user-passed nodemask,
+ and if the allowed nodes changes, then that original nodemask will
+ be remapped relative to the new set of allowed nodes.
+
+ Without this flag (and without MPOL_F_STATIC_NODES), anytime a
+ mempolicy is rebound because of a change in the set of allowed
+ nodes, the node (Preferred) or nodemask (Bind, Interleave) is
+ remapped to the new set of allowed nodes. That remap may not
+ preserve the relative nature of the user's passed nodemask to its
+ set of allowed nodes upon successive rebinds: a nodemask of
+ 1,3,5 may be remapped to 7-9 and then to 1-3 if the set of
+ allowed nodes is restored to its original state.
+
+ With this flag, the remap is done so that the node numbers from
+ the user's passed nodemask are relative to the set of allowed
+ nodes. In other words, if nodes 0, 2, and 4 are set in the user's
+ nodemask, the policy will be effected over the first (and in the
+ Bind or Interleave case, the third and fifth) nodes in the set of
+ allowed nodes. The nodemask passed by the user represents nodes
+ relative to task or VMA's set of allowed nodes.
+
+ If the user's nodemask includes nodes that are outside the range
+ of the new set of allowed nodes (for example, node 5 is set in
+ the user's nodemask when the set of allowed nodes is only 0-3),
+ then the remap wraps around to the beginning of the nodemask and,
+ if not already set, sets the node in the mempolicy nodemask.
+
+ For example, consider a task that is attached to a cpuset with
+ mems 2-5 that sets an Interleave policy over the same set with
+ MPOL_F_RELATIVE_NODES. If the cpuset's mems change to 3-7, the
+ interleave now occurs over nodes 3,5-6. If the cpuset's mems
+ then change to 0,2-3,5, then the interleave occurs over nodes
+ 0,3,5.
+
+ Thanks to the consistent remapping, applications preparing
+ nodemasks to specify memory policies using this flag should
+ disregard their current, actual cpuset imposed memory placement
+ and prepare the nodemask as if they were always located on
+ memory nodes 0 to N-1, where N is the number of memory nodes the
+ policy is intended to manage. Let the kernel then remap to the
+ set of memory nodes allowed by the task's cpuset, as that may
+ change over time.
+
+ MPOL_F_RELATIVE_NODES cannot be combined with the
+ MPOL_F_STATIC_NODES flag. It also cannot be used for
+ MPOL_PREFERRED policies that were created with an empty nodemask
+ (local allocation).
+
+MEMORY POLICY REFERENCE COUNTING
+
+To resolve use/free races, struct mempolicy contains an atomic reference
+count field. Internal interfaces, mpol_get()/mpol_put() increment and
+decrement this reference count, respectively. mpol_put() will only free
+the structure back to the mempolicy kmem cache when the reference count
+goes to zero.
+
+When a new memory policy is allocated, it's reference count is initialized
+to '1', representing the reference held by the task that is installing the
+new policy. When a pointer to a memory policy structure is stored in another
+structure, another reference is added, as the task's reference will be dropped
+on completion of the policy installation.
+
+During run-time "usage" of the policy, we attempt to minimize atomic operations
+on the reference count, as this can lead to cache lines bouncing between cpus
+and NUMA nodes. "Usage" here means one of the following:
+
+1) querying of the policy, either by the task itself [using the get_mempolicy()
+ API discussed below] or by another task using the /proc/<pid>/numa_maps
+ interface.
+
+2) examination of the policy to determine the policy mode and associated node
+ or node lists, if any, for page allocation. This is considered a "hot
+ path". Note that for MPOL_BIND, the "usage" extends across the entire
+ allocation process, which may sleep during page reclaimation, because the
+ BIND policy nodemask is used, by reference, to filter ineligible nodes.
+
+We can avoid taking an extra reference during the usages listed above as
+follows:
+
+1) we never need to get/free the system default policy as this is never
+ changed nor freed, once the system is up and running.
+
+2) for querying the policy, we do not need to take an extra reference on the
+ target task's task policy nor vma policies because we always acquire the
+ task's mm's mmap_sem for read during the query. The set_mempolicy() and
+ mbind() APIs [see below] always acquire the mmap_sem for write when
+ installing or replacing task or vma policies. Thus, there is no possibility
+ of a task or thread freeing a policy while another task or thread is
+ querying it.
+
+3) Page allocation usage of task or vma policy occurs in the fault path where
+ we hold them mmap_sem for read. Again, because replacing the task or vma
+ policy requires that the mmap_sem be held for write, the policy can't be
+ freed out from under us while we're using it for page allocation.
+
+4) Shared policies require special consideration. One task can replace a
+ shared memory policy while another task, with a distinct mmap_sem, is
+ querying or allocating a page based on the policy. To resolve this
+ potential race, the shared policy infrastructure adds an extra reference
+ to the shared policy during lookup while holding a spin lock on the shared
+ policy management structure. This requires that we drop this extra
+ reference when we're finished "using" the policy. We must drop the
+ extra reference on shared policies in the same query/allocation paths
+ used for non-shared policies. For this reason, shared policies are marked
+ as such, and the extra reference is dropped "conditionally"--i.e., only
+ for shared policies.
+
+ Because of this extra reference counting, and because we must lookup
+ shared policies in a tree structure under spinlock, shared policies are
+ more expensive to use in the page allocation path. This is expecially
+ true for shared policies on shared memory regions shared by tasks running
+ on different NUMA nodes. This extra overhead can be avoided by always
+ falling back to task or system default policy for shared memory regions,
+ or by prefaulting the entire shared memory region into memory and locking
+ it down. However, this might not be appropriate for all applications.
+
MEMORY POLICY APIs
Linux supports 3 system calls for controlling memory policy. These APIS
@@ -251,7 +380,9 @@ Set [Task] Memory Policy:
Set's the calling task's "task/process memory policy" to mode
specified by the 'mode' argument and the set of nodes defined
by 'nmask'. 'nmask' points to a bit mask of node ids containing
- at least 'maxnode' ids.
+ at least 'maxnode' ids. Optional mode flags may be passed by
+ combining the 'mode' argument with the flag (for example:
+ MPOL_INTERLEAVE | MPOL_F_STATIC_NODES).
See the set_mempolicy(2) man page for more details
@@ -303,29 +434,19 @@ MEMORY POLICIES AND CPUSETS
Memory policies work within cpusets as described above. For memory policies
that require a node or set of nodes, the nodes are restricted to the set of
nodes whose memories are allowed by the cpuset constraints. If the nodemask
-specified for the policy contains nodes that are not allowed by the cpuset, or
-the intersection of the set of nodes specified for the policy and the set of
-nodes with memory is the empty set, the policy is considered invalid
-and cannot be installed.
-
-The interaction of memory policies and cpusets can be problematic for a
-couple of reasons:
-
-1) the memory policy APIs take physical node id's as arguments. As mentioned
- above, it is illegal to specify nodes that are not allowed in the cpuset.
- The application must query the allowed nodes using the get_mempolicy()
- API with the MPOL_F_MEMS_ALLOWED flag to determine the allowed nodes and
- restrict itself to those nodes. However, the resources available to a
- cpuset can be changed by the system administrator, or a workload manager
- application, at any time. So, a task may still get errors attempting to
- specify policy nodes, and must query the allowed memories again.
-
-2) when tasks in two cpusets share access to a memory region, such as shared
- memory segments created by shmget() of mmap() with the MAP_ANONYMOUS and
- MAP_SHARED flags, and any of the tasks install shared policy on the region,
- only nodes whose memories are allowed in both cpusets may be used in the
- policies. Obtaining this information requires "stepping outside" the
- memory policy APIs to use the cpuset information and requires that one
- know in what cpusets other task might be attaching to the shared region.
- Furthermore, if the cpusets' allowed memory sets are disjoint, "local"
- allocation is the only valid policy.
+specified for the policy contains nodes that are not allowed by the cpuset and
+MPOL_F_RELATIVE_NODES is not used, the intersection of the set of nodes
+specified for the policy and the set of nodes with memory is used. If the
+result is the empty set, the policy is considered invalid and cannot be
+installed. If MPOL_F_RELATIVE_NODES is used, the policy's nodes are mapped
+onto and folded into the task's set of allowed nodes as previously described.
+
+The interaction of memory policies and cpusets can be problematic when tasks
+in two cpusets share access to a memory region, such as shared memory segments
+created by shmget() of mmap() with the MAP_ANONYMOUS and MAP_SHARED flags, and
+any of the tasks install shared policy on the region, only nodes whose
+memories are allowed in both cpusets may be used in the policies. Obtaining
+this information requires "stepping outside" the memory policy APIs to use the
+cpuset information and requires that one know in what cpusets other task might
+be attaching to the shared region. Furthermore, if the cpusets' allowed
+memory sets are disjoint, "local" allocation is the only valid policy.
diff --git a/Kbuild b/Kbuild
index 1570d248ad92..32f19c5c9bb0 100644
--- a/Kbuild
+++ b/Kbuild
@@ -1,26 +1,61 @@
#
# Kbuild for top-level directory of the kernel
# This file takes care of the following:
-# 1) Generate asm-offsets.h
-# 2) Check for missing system calls
+# 1) Generate bounds.h
+# 2) Generate asm-offsets.h (may need bounds.h)
+# 3) Check for missing system calls
#####
-# 1) Generate asm-offsets.h
+# 1) Generate bounds.h
+
+bounds-file := include/linux/bounds.h
+
+always := $(bounds-file)
+targets := $(bounds-file) kernel/bounds.s
+
+quiet_cmd_bounds = GEN $@
+define cmd_bounds
+ (set -e; \
+ echo "#ifndef __LINUX_BOUNDS_H__"; \
+ echo "#define __LINUX_BOUNDS_H__"; \
+ echo "/*"; \
+ echo " * DO NOT MODIFY."; \
+ echo " *"; \
+ echo " * This file was generated by Kbuild"; \
+ echo " *"; \
+ echo " */"; \
+ echo ""; \
+ sed -ne $(sed-y) $<; \
+ echo ""; \
+ echo "#endif" ) > $@
+endef
+
+# We use internal kbuild rules to avoid the "is up to date" message from make
+kernel/bounds.s: kernel/bounds.c FORCE
+ $(Q)mkdir -p $(dir $@)
+ $(call if_changed_dep,cc_s_c)
+
+$(obj)/$(bounds-file): kernel/bounds.s Kbuild
+ $(Q)mkdir -p $(dir $@)
+ $(call cmd,bounds)
+
+#####
+# 2) Generate asm-offsets.h
#
offsets-file := include/asm-$(SRCARCH)/asm-offsets.h
-always := $(offsets-file)
-targets := $(offsets-file)
+always += $(offsets-file)
+targets += $(offsets-file)
targets += arch/$(SRCARCH)/kernel/asm-offsets.s
-clean-files := $(addprefix $(objtree)/,$(targets))
+
# Default sed regexp - multiline due to syntax constraints
define sed-y
- "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
+ "/^->/{s:->#\(.*\):/* \1 */:; \
+ s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
+ s:->::; p;}"
endef
-# Override default regexp for specific architectures
-sed-$(CONFIG_MIPS) := "/^@@@/{s/^@@@//; s/ \#.*\$$//; p;}"
quiet_cmd_offsets = GEN $@
define cmd_offsets
@@ -40,7 +75,8 @@ define cmd_offsets
endef
# We use internal kbuild rules to avoid the "is up to date" message from make
-arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c FORCE
+arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c \
+ $(obj)/$(bounds-file) FORCE
$(Q)mkdir -p $(dir $@)
$(call if_changed_dep,cc_s_c)
@@ -49,7 +85,7 @@ $(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s Kbuild
$(call cmd,offsets)
#####
-# 2) Check for missing system calls
+# 3) Check for missing system calls
#
quiet_cmd_syscalls = CALL $<
@@ -58,3 +94,7 @@ quiet_cmd_syscalls = CALL $<
PHONY += missing-syscalls
missing-syscalls: scripts/checksyscalls.sh FORCE
$(call cmd,syscalls)
+
+# Delete all targets during make clean
+clean-files := $(addprefix $(objtree)/,$(targets))
+
diff --git a/MAINTAINERS b/MAINTAINERS
index a942f3852499..c1dd1ae7b133 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2329,6 +2329,13 @@ L: kvm-devel@lists.sourceforge.net
W: kvm.sourceforge.net
S: Supported
+KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
+P: Hollis Blanchard
+M: hollisb@us.ibm.com
+L: kvm-ppc-devel@lists.sourceforge.net
+W: kvm.sourceforge.net
+S: Supported
+
KERNEL VIRTUAL MACHINE For Itanium(KVM/IA64)
P: Anthony Xu
M: anthony.xu@intel.com
@@ -2338,6 +2345,16 @@ L: kvm-ia64-devel@lists.sourceforge.net
W: kvm.sourceforge.net
S: Supported
+KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
+P: Carsten Otte
+M: cotte@de.ibm.com
+P: Christian Borntraeger
+M: borntraeger@de.ibm.com
+M: linux390@de.ibm.com
+L: linux-s390@vger.kernel.org
+W: http://www.ibm.com/developerworks/linux/linux390/
+S: Supported
+
KEXEC
P: Eric Biederman
M: ebiederm@xmission.com
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index f10d2eddd2c3..b04f1feb1dda 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -994,7 +994,7 @@ marvel_agp_configure(alpha_agp_info *agp)
* rate, but warn the user.
*/
printk("%s: unknown PLL setting RNGB=%lx (PLL6_CTL=%016lx)\n",
- __FUNCTION__, IO7_PLL_RNGB(agp_pll), agp_pll);
+ __func__, IO7_PLL_RNGB(agp_pll), agp_pll);
break;
}
@@ -1044,13 +1044,13 @@ marvel_agp_translate(alpha_agp_info *agp, dma_addr_t addr)
if (addr < agp->aperture.bus_base ||
addr >= agp->aperture.bus_base + agp->aperture.size) {
- printk("%s: addr out of range\n", __FUNCTION__);
+ printk("%s: addr out of range\n", __func__);
return -EINVAL;
}
pte = aper->arena->ptes[baddr >> PAGE_SHIFT];
if (!(pte & 1)) {
- printk("%s: pte not valid\n", __FUNCTION__);
+ printk("%s: pte not valid\n", __func__);
return -EINVAL;
}
return (pte >> 1) << PAGE_SHIFT;
diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c
index f5ca5255eb06..c0750291b44a 100644
--- a/arch/alpha/kernel/core_t2.c
+++ b/arch/alpha/kernel/core_t2.c
@@ -336,10 +336,7 @@ t2_direct_map_window1(unsigned long base, unsigned long length)
#if DEBUG_PRINT_FINAL_SETTINGS
printk("%s: setting WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n",
- __FUNCTION__,
- *(vulp)T2_WBASE1,
- *(vulp)T2_WMASK1,
- *(vulp)T2_TBASE1);
+ __func__, *(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1);
#endif
}
@@ -366,10 +363,7 @@ t2_sg_map_window2(struct pci_controller *hose,
#if DEBUG_PRINT_FINAL_SETTINGS
printk("%s: setting WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n",
- __FUNCTION__,
- *(vulp)T2_WBASE2,
- *(vulp)T2_WMASK2,
- *(vulp)T2_TBASE2);
+ __func__, *(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2);
#endif
}
@@ -377,15 +371,15 @@ static void __init
t2_save_configuration(void)
{
#if DEBUG_PRINT_INITIAL_SETTINGS
- printk("%s: HAE_1 was 0x%lx\n", __FUNCTION__, srm_hae); /* HW is 0 */
- printk("%s: HAE_2 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_2);
- printk("%s: HAE_3 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_3);
- printk("%s: HAE_4 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_4);
- printk("%s: HBASE was 0x%lx\n", __FUNCTION__, *(vulp)T2_HBASE);
+ printk("%s: HAE_1 was 0x%lx\n", __func__, srm_hae); /* HW is 0 */
+ printk("%s: HAE_2 was 0x%lx\n", __func__, *(vulp)T2_HAE_2);
+ printk("%s: HAE_3 was 0x%lx\n", __func__, *(vulp)T2_HAE_3);
+ printk("%s: HAE_4 was 0x%lx\n", __func__, *(vulp)T2_HAE_4);
+ printk("%s: HBASE was 0x%lx\n", __func__, *(vulp)T2_HBASE);
- printk("%s: WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n", __FUNCTION__,
+ printk("%s: WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n", __func__,
*(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1);
- printk("%s: WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n", __FUNCTION__,
+ printk("%s: WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n", __func__,
*(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2);
#endif
diff --git a/arch/alpha/kernel/core_titan.c b/arch/alpha/kernel/core_titan.c
index 819326627b96..319fcb74611e 100644
--- a/arch/alpha/kernel/core_titan.c
+++ b/arch/alpha/kernel/core_titan.c
@@ -365,21 +365,21 @@ void __init
titan_init_arch(void)
{
#if 0
- printk("%s: titan_init_arch()\n", __FUNCTION__);
- printk("%s: CChip registers:\n", __FUNCTION__);
- printk("%s: CSR_CSC 0x%lx\n", __FUNCTION__, TITAN_cchip->csc.csr);
- printk("%s: CSR_MTR 0x%lx\n", __FUNCTION__, TITAN_cchip->mtr.csr);
- printk("%s: CSR_MISC 0x%lx\n", __FUNCTION__, TITAN_cchip->misc.csr);
- printk("%s: CSR_DIM0 0x%lx\n", __FUNCTION__, TITAN_cchip->dim0.csr);
- printk("%s: CSR_DIM1 0x%lx\n", __FUNCTION__, TITAN_cchip->dim1.csr);
- printk("%s: CSR_DIR0 0x%lx\n", __FUNCTION__, TITAN_cchip->dir0.csr);
- printk("%s: CSR_DIR1 0x%lx\n", __FUNCTION__, TITAN_cchip->dir1.csr);
- printk("%s: CSR_DRIR 0x%lx\n", __FUNCTION__, TITAN_cchip->drir.csr);
-
- printk("%s: DChip registers:\n", __FUNCTION__);
- printk("%s: CSR_DSC 0x%lx\n", __FUNCTION__, TITAN_dchip->dsc.csr);
- printk("%s: CSR_STR 0x%lx\n", __FUNCTION__, TITAN_dchip->str.csr);
- printk("%s: CSR_DREV 0x%lx\n", __FUNCTION__, TITAN_dchip->drev.csr);
+ printk("%s: titan_init_arch()\n", __func__);
+ printk("%s: CChip registers:\n", __func__);
+ printk("%s: CSR_CSC 0x%lx\n", __func__, TITAN_cchip->csc.csr);
+ printk("%s: CSR_MTR 0x%lx\n", __func__, TITAN_cchip->mtr.csr);
+ printk("%s: CSR_MISC 0x%lx\n", __func__, TITAN_cchip->misc.csr);
+ printk("%s: CSR_DIM0 0x%lx\n", __func__, TITAN_cchip->dim0.csr);
+ printk("%s: CSR_DIM1 0x%lx\n", __func__, TITAN_cchip->dim1.csr);
+ printk("%s: CSR_DIR0 0x%lx\n", __func__, TITAN_cchip->dir0.csr);
+ printk("%s: CSR_DIR1 0x%lx\n", __func__, TITAN_cchip->dir1.csr);
+ printk("%s: CSR_DRIR 0x%lx\n", __func__, TITAN_cchip->drir.csr);
+
+ printk("%s: DChip registers:\n", __func__);
+ printk("%s: CSR_DSC 0x%lx\n", __func__, TITAN_dchip->dsc.csr);
+ printk("%s: CSR_STR 0x%lx\n", __func__, TITAN_dchip->str.csr);
+ printk("%s: CSR_DREV 0x%lx\n", __func__, TITAN_dchip->drev.csr);
#endif
boot_cpuid = __hard_smp_processor_id();
@@ -700,13 +700,13 @@ titan_agp_translate(alpha_agp_info *agp, dma_addr_t addr)
if (addr < agp->aperture.bus_base ||
addr >= agp->aperture.bus_base + agp->aperture.size) {
- printk("%s: addr out of range\n", __FUNCTION__);
+ printk("%s: addr out of range\n", __func__);
return -EINVAL;
}
pte = aper->arena->ptes[baddr >> PAGE_SHIFT];
if (!(pte & 1)) {
- printk("%s: pte not valid\n", __FUNCTION__);
+ printk("%s: pte not valid\n", __func__);
return -EINVAL;
}
diff --git a/arch/alpha/kernel/core_tsunami.c b/arch/alpha/kernel/core_tsunami.c
index ef91e09590d4..5e7c28f92f19 100644
--- a/arch/alpha/kernel/core_tsunami.c
+++ b/arch/alpha/kernel/core_tsunami.c
@@ -241,8 +241,6 @@ tsunami_probe_write(volatile unsigned long *vaddr)
#define tsunami_probe_read(ADDR) 1
#endif /* NXM_MACHINE_CHECKS_ON_TSUNAMI */
-#define FN __FUNCTION__
-
static void __init
tsunami_init_one_pchip(tsunami_pchip *pchip, int index)
{
@@ -383,27 +381,27 @@ tsunami_init_arch(void)
/* NXMs just don't matter to Tsunami--unless they make it
choke completely. */
tmp = (unsigned long)(TSUNAMI_cchip - 1);
- printk("%s: probing bogus address: 0x%016lx\n", FN, bogus_addr);
+ printk("%s: probing bogus address: 0x%016lx\n", __func__, bogus_addr);
printk("\tprobe %s\n",
tsunami_probe_write((unsigned long *)bogus_addr)
? "succeeded" : "failed");
#endif /* NXM_MACHINE_CHECKS_ON_TSUNAMI */
#if 0
- printk("%s: CChip registers:\n", FN);
- printk("%s: CSR_CSC 0x%lx\n", FN, TSUNAMI_cchip->csc.csr);
- printk("%s: CSR_MTR 0x%lx\n", FN, TSUNAMI_cchip.mtr.csr);
- printk("%s: CSR_MISC 0x%lx\n", FN, TSUNAMI_cchip->misc.csr);
- printk("%s: CSR_DIM0 0x%lx\n", FN, TSUNAMI_cchip->dim0.csr);
- printk("%s: CSR_DIM1 0x%lx\n", FN, TSUNAMI_cchip->dim1.csr);
- printk("%s: CSR_DIR0 0x%lx\n", FN, TSUNAMI_cchip->dir0.csr);
- printk("%s: CSR_DIR1 0x%lx\n", FN, TSUNAMI_cchip->dir1.csr);
- printk("%s: CSR_DRIR 0x%lx\n", FN, TSUNAMI_cchip->drir.csr);
+ printk("%s: CChip registers:\n", __func__);
+ printk("%s: CSR_CSC 0x%lx\n", __func__, TSUNAMI_cchip->csc.csr);
+ printk("%s: CSR_MTR 0x%lx\n", __func__, TSUNAMI_cchip.mtr.csr);
+ printk("%s: CSR_MISC 0x%lx\n", __func__, TSUNAMI_cchip->misc.csr);
+ printk("%s: CSR_DIM0 0x%lx\n", __func__, TSUNAMI_cchip->dim0.csr);
+ printk("%s: CSR_DIM1 0x%lx\n", __func__, TSUNAMI_cchip->dim1.csr);
+ printk("%s: CSR_DIR0 0x%lx\n", __func__, TSUNAMI_cchip->dir0.csr);
+ printk("%s: CSR_DIR1 0x%lx\n", __func__, TSUNAMI_cchip->dir1.csr);
+ printk("%s: CSR_DRIR 0x%lx\n", __func__, TSUNAMI_cchip->drir.csr);
printk("%s: DChip registers:\n");
- printk("%s: CSR_DSC 0x%lx\n", FN, TSUNAMI_dchip->dsc.csr);
- printk("%s: CSR_STR 0x%lx\n", FN, TSUNAMI_dchip->str.csr);
- printk("%s: CSR_DREV 0x%lx\n", FN, TSUNAMI_dchip->drev.csr);
+ printk("%s: CSR_DSC 0x%lx\n", __func__, TSUNAMI_dchip->dsc.csr);
+ printk("%s: CSR_STR 0x%lx\n", __func__, TSUNAMI_dchip->str.csr);
+ printk("%s: CSR_DREV 0x%lx\n", __func__, TSUNAMI_dchip->drev.csr);
#endif
/* With multiple PCI busses, we play with I/O as physical addrs. */
ioport_resource.end = ~0UL;
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
index 026ba9af6d6a..ebc3c894b5a2 100644
--- a/arch/alpha/kernel/module.c
+++ b/arch/alpha/kernel/module.c
@@ -120,6 +120,12 @@ module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs,
nsyms = symtab->sh_size / sizeof(Elf64_Sym);
chains = kcalloc(nsyms, sizeof(struct got_entry), GFP_KERNEL);
+ if (!chains) {
+ printk(KERN_ERR
+ "module %s: no memory for symbol chain buffer\n",
+ me->name);
+ return -ENOMEM;
+ }
got->sh_size = 0;
got->sh_addralign = 8;
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 78357798b6fd..baf57563b14c 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -208,7 +208,7 @@ pdev_save_srm_config(struct pci_dev *dev)
tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp) {
- printk(KERN_ERR "%s: kmalloc() failed!\n", __FUNCTION__);
+ printk(KERN_ERR "%s: kmalloc() failed!\n", __func__);
return;
}
tmp->next = srm_saved_configs;
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index dd6e334ab9e1..2179c602032a 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -79,25 +79,21 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
#ifdef CONFIG_DISCONTIGMEM
- if (!NODE_DATA(nid) ||
- (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid),
- sizeof(*arena))))) {
- printk("%s: couldn't allocate arena from node %d\n"
- " falling back to system-wide allocation\n",
- __FUNCTION__, nid);
- arena = alloc_bootmem(sizeof(*arena));
- }
-
- if (!NODE_DATA(nid) ||
- (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid),
- mem_size,
- align,
- 0)))) {
- printk("%s: couldn't allocate arena ptes from node %d\n"
- " falling back to system-wide allocation\n",
- __FUNCTION__, nid);
- arena->ptes = __alloc_bootmem(mem_size, align, 0);
- }
+ arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
+ if (!NODE_DATA(nid) || !arena) {
+ printk("%s: couldn't allocate arena from node %d\n"
+ " falling back to system-wide allocation\n",
+ __func__, nid);
+ arena = alloc_bootmem(sizeof(*arena));
+ }
+
+ arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
+ if (!NODE_DATA(nid) || !arena->ptes) {
+ printk("%s: couldn't allocate arena ptes from node %d\n"
+ " falling back to system-wide allocation\n",
+ __func__, nid);
+ arena->ptes = __alloc_bootmem(mem_size, align, 0);
+ }
#else /* CONFIG_DISCONTIGMEM */
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 63c2073401ee..2525692db0ab 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -755,7 +755,7 @@ smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
if (atomic_read(&data.unstarted_count) > 0) {
long start_time = jiffies;
printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
- __FUNCTION__);
+ __func__);
timeout = jiffies + 30 * HZ;
while (atomic_read(&data.unstarted_count) > 0
&& time_before(jiffies, timeout))
@@ -764,7 +764,7 @@ smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
long delta = jiffies - start_time;
printk(KERN_ERR
"%s: response %ld.%ld seconds into long wait\n",
- __FUNCTION__, delta / HZ,
+ __func__, delta / HZ,
(100 * (delta - ((delta / HZ) * HZ))) / HZ);
}
}
diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c
index f7dd081d57ff..78ad7cd1bbd6 100644
--- a/arch/alpha/kernel/srm_env.c
+++ b/arch/alpha/kernel/srm_env.c
@@ -199,7 +199,7 @@ srm_env_init(void)
printk(KERN_INFO "%s: This Alpha system doesn't "
"know about SRM (or you've booted "
"SRM->MILO->Linux, which gets "
- "misdetected)...\n", __FUNCTION__);
+ "misdetected)...\n", __func__);
return -ENODEV;
}
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index d187d01d2a17..e53a1e1c2f21 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -259,7 +259,7 @@ alcor_init_pci(void)
if (dev && dev->devfn == PCI_DEVFN(6,0)) {
alpha_mv.sys.cia.gru_int_req_bits = XLT_GRU_INT_REQ_BITS;
printk(KERN_INFO "%s: Detected AS500 or XLT motherboard.\n",
- __FUNCTION__);
+ __func__);
}
pci_dev_put(dev);
}
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 922143ea1cdb..828449cd2636 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -80,7 +80,7 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
if (!(io7 = marvel_find_io7(pid))) {
printk(KERN_ERR
"%s for nonexistent io7 -- vec %x, pid %d\n",
- __FUNCTION__, irq, pid);
+ __func__, irq, pid);
return NULL;
}
@@ -90,7 +90,7 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
if (irq >= 0x180) {
printk(KERN_ERR
"%s for invalid irq -- pid %d adjusted irq %x\n",
- __FUNCTION__, pid, irq);
+ __func__, pid, irq);
return NULL;
}
@@ -110,8 +110,8 @@ io7_enable_irq(unsigned int irq)
ctl = io7_get_irq_ctl(irq, &io7);
if (!ctl || !io7) {
- printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
- __FUNCTION__, irq);
+ printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
+ __func__, irq);
return;
}
@@ -130,8 +130,8 @@ io7_disable_irq(unsigned int irq)
ctl = io7_get_irq_ctl(irq, &io7);
if (!ctl || !io7) {
- printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
- __FUNCTION__, irq);
+ printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
+ __func__, irq);
return;
}
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index 906019cfa681..99a7f19da13a 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -454,7 +454,7 @@ sable_lynx_enable_irq(unsigned int irq)
spin_unlock(&sable_lynx_irq_lock);
#if 0
printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n",
- __FUNCTION__, mask, bit, irq);
+ __func__, mask, bit, irq);
#endif
}
@@ -470,7 +470,7 @@ sable_lynx_disable_irq(unsigned int irq)
spin_unlock(&sable_lynx_irq_lock);
#if 0
printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n",
- __FUNCTION__, mask, bit, irq);
+ __func__, mask, bit, irq);
#endif
}
@@ -524,7 +524,7 @@ sable_lynx_srm_device_interrupt(unsigned long vector)
irq = sable_lynx_irq_swizzle->mask_to_irq[bit];
#if 0
printk("%s: vector 0x%lx bit 0x%x irq 0x%x\n",
- __FUNCTION__, vector, bit, irq);
+ __func__, vector, bit, irq);
#endif
handle_irq(irq);
}
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
index ee7b9009ebb4..d4327e461c22 100644
--- a/arch/alpha/kernel/sys_sio.c
+++ b/arch/alpha/kernel/sys_sio.c
@@ -89,7 +89,7 @@ sio_pci_route(void)
/* First, ALWAYS read and print the original setting. */
pci_bus_read_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60,
&orig_route_tab);
- printk("%s: PIRQ original 0x%x new 0x%x\n", __FUNCTION__,
+ printk("%s: PIRQ original 0x%x new 0x%x\n", __func__,
orig_route_tab, alpha_mv.sys.sio.route_tab);
#if defined(ALPHA_RESTORE_SRM_SETUP)
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index 2dc7f9fed213..dc57790250d2 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -8,6 +8,7 @@
* This file initializes the trap entry points
*/
+#include <linux/jiffies.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/tty.h>
@@ -770,7 +771,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
unsigned long reg, struct pt_regs *regs)
{
static int cnt = 0;
- static long last_time = 0;
+ static unsigned long last_time;
unsigned long tmp1, tmp2, tmp3, tmp4;
unsigned long fake_reg, *reg_addr = &fake_reg;
@@ -781,7 +782,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
with the unaliged access. */
if (!test_thread_flag (TIF_UAC_NOPRINT)) {
- if (cnt >= 5 && jiffies - last_time > 5*HZ) {
+ if (cnt >= 5 && time_after(jiffies, last_time + 5 * HZ)) {
cnt = 0;
}
if (++cnt < 5) {
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 37cd547855b1..728bb8f39441 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -539,6 +539,17 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
at91_set_B_periph(AT91_PIN_PB28, 0); /* LCDD23 */
#endif
+ if (ARRAY_SIZE(lcdc_resources) > 2) {
+ void __iomem *fb;
+ struct resource *fb_res = &lcdc_resources[2];
+ size_t fb_len = fb_res->end - fb_res->start + 1;
+
+ fb = ioremap_writecombine(fb_res->start, fb_len);
+ if (fb) {
+ memset(fb, 0, fb_len);
+ iounmap(fb, fb_len);
+ }
+ }
lcdc_data = *data;
platform_device_register(&at91_lcdc_device);
}
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index dbb9a5fc2090..054689804e77 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -381,6 +381,20 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
at91_set_B_periph(AT91_PIN_PC24, 0); /* LCDD22 */
at91_set_B_periph(AT91_PIN_PC25, 0); /* LCDD23 */
+#ifdef CONFIG_FB_INTSRAM
+ {
+ void __iomem *fb;
+ struct resource *fb_res = &lcdc_resources[2];
+ size_t fb_len = fb_res->end - fb_res->start + 1;
+
+ fb = ioremap_writecombine(fb_res->start, fb_len);
+ if (fb) {
+ memset(fb, 0, fb_len);
+ iounmap(fb, fb_len);
+ }
+ }
+#endif
+
lcdc_data = *data;
platform_device_register(&at91_lcdc_device);
}
diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c
index 2687b730e2d0..ce48c14f4349 100644
--- a/arch/avr32/kernel/setup.c
+++ b/arch/avr32/kernel/setup.c
@@ -274,6 +274,8 @@ static int __init early_parse_fbmem(char *p)
printk(KERN_WARNING
"Failed to allocate framebuffer memory\n");
fbmem_size = 0;
+ } else {
+ memset(__va(fbmem_start), 0, fbmem_size);
}
}
diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c
index 4207a2b52750..5b06ffa15e34 100644
--- a/arch/cris/mm/init.c
+++ b/arch/cris/mm/init.c
@@ -27,7 +27,6 @@ show_mem(void)
printk("\nMem-info:\n");
show_free_areas();
- printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
i = max_mapnr;
while (i-- > 0) {
total++;
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index cd13e138bd03..3aa6c821449a 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -19,6 +19,7 @@ config IA64
select HAVE_OPROFILE
select HAVE_KPROBES
select HAVE_KRETPROBES
+ select HAVE_KVM
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
@@ -589,6 +590,8 @@ config MSPEC
source "fs/Kconfig"
+source "arch/ia64/kvm/Kconfig"
+
source "lib/Kconfig"
#
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index f1645c4f7039..ec4cca477f49 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -57,6 +57,7 @@ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
+core-$(CONFIG_KVM) += arch/ia64/kvm/
drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
new file mode 100644
index 000000000000..7914e4828504
--- /dev/null
+++ b/arch/ia64/kvm/Kconfig
@@ -0,0 +1,49 @@
+#
+# KVM configuration
+#
+config HAVE_KVM
+ bool
+
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
+ depends on HAVE_KVM || IA64
+ default y
+ ---help---
+ Say Y here to get to see options for using your Linux host to run other
+ operating systems inside virtual machines (guests).
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+config KVM
+ tristate "Kernel-based Virtual Machine (KVM) support"
+ depends on HAVE_KVM && EXPERIMENTAL
+ select PREEMPT_NOTIFIERS
+ select ANON_INODES
+ ---help---
+ Support hosting fully virtualized guest machines using hardware
+ virtualization extensions. You will need a fairly recent
+ processor equipped with virtualization extensions. You will also
+ need to select one or more of the processor modules below.
+
+ This module provides access to the hardware capabilities through
+ a character device node named /dev/kvm.
+
+ To compile this as a module, choose M here: the module
+ will be called kvm.
+
+ If unsure, say N.
+
+config KVM_INTEL
+ tristate "KVM for Intel Itanium 2 processors support"
+ depends on KVM && m
+ ---help---
+ Provides support for KVM on Itanium 2 processors equipped with the VT
+ extensions.
+
+config KVM_TRACE
+ bool
+
+endif # VIRTUALIZATION
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
new file mode 100644
index 000000000000..41b034ffa73b
--- /dev/null
+++ b/arch/ia64/kvm/Makefile
@@ -0,0 +1,61 @@
+#This Make file is to generate asm-offsets.h and build source.
+#
+
+#Generate asm-offsets.h for vmm module build
+offsets-file := asm-offsets.h
+
+always := $(offsets-file)
+targets := $(offsets-file)
+targets += arch/ia64/kvm/asm-offsets.s
+clean-files := $(addprefix $(objtree)/,$(targets) $(obj)/memcpy.S $(obj)/memset.S)
+
+# Default sed regexp - multiline due to syntax constraints
+define sed-y
+ "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
+endef
+
+quiet_cmd_offsets = GEN $@
+define cmd_offsets
+ (set -e; \
+ echo "#ifndef __ASM_KVM_OFFSETS_H__"; \
+ echo "#define __ASM_KVM_OFFSETS_H__"; \
+ echo "/*"; \
+ echo " * DO NOT MODIFY."; \
+ echo " *"; \
+ echo " * This file was generated by Makefile"; \
+ echo " *"; \
+ echo " */"; \
+ echo ""; \
+ sed -ne $(sed-y) $<; \
+ echo ""; \
+ echo "#endif" ) > $@
+endef
+# We use internal rules to avoid the "is up to date" message from make
+arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c
+ $(call if_changed_dep,cc_s_c)
+
+$(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s
+ $(call cmd,offsets)
+
+#
+# Makefile for Kernel-based Virtual Machine module
+#
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
+
+$(addprefix $(objtree)/,$(obj)/memcpy.S $(obj)/memset.S):
+ $(shell ln -snf ../lib/memcpy.S $(src)/memcpy.S)
+ $(shell ln -snf ../lib/memset.S $(src)/memset.S)
+
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o)
+
+kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
+obj-$(CONFIG_KVM) += kvm.o
+
+FORCE : $(obj)/$(offsets-file)
+EXTRA_CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127
+kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \
+ vtlb.o process.o
+#Add link memcpy and memset to avoid possible structure assignment error
+kvm-intel-objs += memset.o memcpy.o
+obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c
new file mode 100644
index 000000000000..4e3dc13a619c
--- /dev/null
+++ b/arch/ia64/kvm/asm-offsets.c
@@ -0,0 +1,251 @@
+/*
+ * asm-offsets.c Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ *
+ * Anthony Xu <anthony.xu@intel.com>
+ * Xiantao Zhang <xiantao.zhang@intel.com>
+ * Copyright (c) 2007 Intel Corporation KVM support.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <linux/autoconf.h>
+#include <linux/kvm_host.h>
+
+#include "vcpu.h"
+
+#define task_struct kvm_vcpu
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " (%0) " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : :)
+
+#define OFFSET(_sym, _str, _mem) \
+ DEFINE(_sym, offsetof(_str, _mem));
+
+void foo(void)
+{
+ DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu));
+ DEFINE(VMM_PT_REGS_SIZE, sizeof(struct kvm_pt_regs));
+
+ BLANK();
+
+ DEFINE(VMM_VCPU_META_RR0_OFFSET,
+ offsetof(struct kvm_vcpu, arch.metaphysical_rr0));
+ DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET,
+ offsetof(struct kvm_vcpu,
+ arch.metaphysical_saved_rr0));
+ DEFINE(VMM_VCPU_VRR0_OFFSET,
+ offsetof(struct kvm_vcpu, arch.vrr[0]));
+ DEFINE(VMM_VPD_IRR0_OFFSET,
+ offsetof(struct vpd, irr[0]));
+ DEFINE(VMM_VCPU_ITC_CHECK_OFFSET,
+ offsetof(struct kvm_vcpu, arch.itc_check));
+ DEFINE(VMM_VCPU_IRQ_CHECK_OFFSET,
+ offsetof(struct kvm_vcpu, arch.irq_check));
+ DEFINE(VMM_VPD_VHPI_OFFSET,
+ offsetof(struct vpd, vhpi));
+ DEFINE(VMM_VCPU_VSA_BASE_OFFSET,
+ offsetof(struct kvm_vcpu, arch.vsa_base));
+ DEFINE(VMM_VCPU_VPD_OFFSET,
+ offsetof(struct kvm_vcpu, arch.vpd));
+ DEFINE(VMM_VCPU_IRQ_CHECK,
+ offsetof(struct kvm_vcpu, arch.irq_check));
+ DEFINE(VMM_VCPU_TIMER_PENDING,
+ offsetof(struct kvm_vcpu, arch.timer_pending));
+ DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET,
+ offsetof(struct kvm_vcpu, arch.metaphysical_saved_rr0));
+ DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET,
+ offsetof(struct kvm_vcpu, arch.mode_flags));
+ DEFINE(VMM_VCPU_ITC_OFS_OFFSET,
+ offsetof(struct kvm_vcpu, arch.itc_offset));
+ DEFINE(VMM_VCPU_LAST_ITC_OFFSET,
+ offsetof(struct kvm_vcpu, arch.last_itc));
+ DEFINE(VMM_VCPU_SAVED_GP_OFFSET,
+ offsetof(struct kvm_vcpu, arch.saved_gp));
+
+ BLANK();
+
+ DEFINE(VMM_PT_REGS_B6_OFFSET,
+ offsetof(struct kvm_pt_regs, b6));
+ DEFINE(VMM_PT_REGS_B7_OFFSET,
+ offsetof(struct kvm_pt_regs, b7));
+ DEFINE(VMM_PT_REGS_AR_CSD_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_csd));
+ DEFINE(VMM_PT_REGS_AR_SSD_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_ssd));
+ DEFINE(VMM_PT_REGS_R8_OFFSET,
+ offsetof(struct kvm_pt_regs, r8));
+ DEFINE(VMM_PT_REGS_R9_OFFSET,
+ offsetof(struct kvm_pt_regs, r9));
+ DEFINE(VMM_PT_REGS_R10_OFFSET,
+ offsetof(struct kvm_pt_regs, r10));
+ DEFINE(VMM_PT_REGS_R11_OFFSET,
+ offsetof(struct kvm_pt_regs, r11));
+ DEFINE(VMM_PT_REGS_CR_IPSR_OFFSET,
+ offsetof(struct kvm_pt_regs, cr_ipsr));
+ DEFINE(VMM_PT_REGS_CR_IIP_OFFSET,
+ offsetof(struct kvm_pt_regs, cr_iip));
+ DEFINE(VMM_PT_REGS_CR_IFS_OFFSET,
+ offsetof(struct kvm_pt_regs, cr_ifs));
+ DEFINE(VMM_PT_REGS_AR_UNAT_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_unat));
+ DEFINE(VMM_PT_REGS_AR_PFS_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_pfs));
+ DEFINE(VMM_PT_REGS_AR_RSC_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_rsc));
+ DEFINE(VMM_PT_REGS_AR_RNAT_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_rnat));
+
+ DEFINE(VMM_PT_REGS_AR_BSPSTORE_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_bspstore));
+ DEFINE(VMM_PT_REGS_PR_OFFSET,
+ offsetof(struct kvm_pt_regs, pr));
+ DEFINE(VMM_PT_REGS_B0_OFFSET,
+ offsetof(struct kvm_pt_regs, b0));
+ DEFINE(VMM_PT_REGS_LOADRS_OFFSET,
+ offsetof(struct kvm_pt_regs, loadrs));
+ DEFINE(VMM_PT_REGS_R1_OFFSET,
+ offsetof(struct kvm_pt_regs, r1));
+ DEFINE(VMM_PT_REGS_R12_OFFSET,
+ offsetof(struct kvm_pt_regs, r12));
+ DEFINE(VMM_PT_REGS_R13_OFFSET,
+ offsetof(struct kvm_pt_regs, r13));
+ DEFINE(VMM_PT_REGS_AR_FPSR_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_fpsr));
+ DEFINE(VMM_PT_REGS_R15_OFFSET,
+ offsetof(struct kvm_pt_regs, r15));
+ DEFINE(VMM_PT_REGS_R14_OFFSET,
+ offsetof(struct kvm_pt_regs, r14));
+ DEFINE(VMM_PT_REGS_R2_OFFSET,
+ offsetof(struct kvm_pt_regs, r2));
+ DEFINE(VMM_PT_REGS_R3_OFFSET,
+ offsetof(struct kvm_pt_regs, r3));
+ DEFINE(VMM_PT_REGS_R16_OFFSET,
+ offsetof(struct kvm_pt_regs, r16));
+ DEFINE(VMM_PT_REGS_R17_OFFSET,
+ offsetof(struct kvm_pt_regs, r17));
+ DEFINE(VMM_PT_REGS_R18_OFFSET,
+ offsetof(struct kvm_pt_regs, r18));
+ DEFINE(VMM_PT_REGS_R19_OFFSET,
+ offsetof(struct kvm_pt_regs, r19));
+ DEFINE(VMM_PT_REGS_R20_OFFSET,
+ offsetof(struct kvm_pt_regs, r20));
+ DEFINE(VMM_PT_REGS_R21_OFFSET,
+ offsetof(struct kvm_pt_regs, r21));
+ DEFINE(VMM_PT_REGS_R22_OFFSET,
+ offsetof(struct kvm_pt_regs, r22));
+ DEFINE(VMM_PT_REGS_R23_OFFSET,
+ offsetof(struct kvm_pt_regs, r23));
+ DEFINE(VMM_PT_REGS_R24_OFFSET,
+ offsetof(struct kvm_pt_regs, r24));
+ DEFINE(VMM_PT_REGS_R25_OFFSET,
+ offsetof(struct kvm_pt_regs, r25));
+ DEFINE(VMM_PT_REGS_R26_OFFSET,
+ offsetof(struct kvm_pt_regs, r26));
+ DEFINE(VMM_PT_REGS_R27_OFFSET,
+ offsetof(struct kvm_pt_regs, r27));
+ DEFINE(VMM_PT_REGS_R28_OFFSET,
+ offsetof(struct kvm_pt_regs, r28));
+ DEFINE(VMM_PT_REGS_R29_OFFSET,
+ offsetof(struct kvm_pt_regs, r29));
+ DEFINE(VMM_PT_REGS_R30_OFFSET,
+ offsetof(struct kvm_pt_regs, r30));
+ DEFINE(VMM_PT_REGS_R31_OFFSET,
+ offsetof(struct kvm_pt_regs, r31));
+ DEFINE(VMM_PT_REGS_AR_CCV_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_ccv));
+ DEFINE(VMM_PT_REGS_F6_OFFSET,
+ offsetof(struct kvm_pt_regs, f6));
+ DEFINE(VMM_PT_REGS_F7_OFFSET,
+ offsetof(struct kvm_pt_regs, f7));
+ DEFINE(VMM_PT_REGS_F8_OFFSET,
+ offsetof(struct kvm_pt_regs, f8));
+ DEFINE(VMM_PT_REGS_F9_OFFSET,
+ offsetof(struct kvm_pt_regs, f9));
+ DEFINE(VMM_PT_REGS_F10_OFFSET,
+ offsetof(struct kvm_pt_regs, f10));
+ DEFINE(VMM_PT_REGS_F11_OFFSET,
+ offsetof(struct kvm_pt_regs, f11));
+ DEFINE(VMM_PT_REGS_R4_OFFSET,
+ offsetof(struct kvm_pt_regs, r4));
+ DEFINE(VMM_PT_REGS_R5_OFFSET,
+ offsetof(struct kvm_pt_regs, r5));
+ DEFINE(VMM_PT_REGS_R6_OFFSET,
+ offsetof(struct kvm_pt_regs, r6));
+ DEFINE(VMM_PT_REGS_R7_OFFSET,
+ offsetof(struct kvm_pt_regs, r7));
+ DEFINE(VMM_PT_REGS_EML_UNAT_OFFSET,
+ offsetof(struct kvm_pt_regs, eml_unat));
+ DEFINE(VMM_VCPU_IIPA_OFFSET,
+ offsetof(struct kvm_vcpu, arch.cr_iipa));
+ DEFINE(VMM_VCPU_OPCODE_OFFSET,
+ offsetof(struct kvm_vcpu, arch.opcode));
+ DEFINE(VMM_VCPU_CAUSE_OFFSET, offsetof(struct kvm_vcpu, arch.cause));
+ DEFINE(VMM_VCPU_ISR_OFFSET,
+ offsetof(struct kvm_vcpu, arch.cr_isr));
+ DEFINE(VMM_PT_REGS_R16_SLOT,
+ (((offsetof(struct kvm_pt_regs, r16)
+ - sizeof(struct kvm_pt_regs)) >> 3) & 0x3f));
+ DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET,
+ offsetof(struct kvm_vcpu, arch.mode_flags));
+ DEFINE(VMM_VCPU_GP_OFFSET, offsetof(struct kvm_vcpu, arch.__gp));
+ BLANK();
+
+ DEFINE(VMM_VPD_BASE_OFFSET, offsetof(struct kvm_vcpu, arch.vpd));
+ DEFINE(VMM_VPD_VIFS_OFFSET, offsetof(struct vpd, ifs));
+ DEFINE(VMM_VLSAPIC_INSVC_BASE_OFFSET,
+ offsetof(struct kvm_vcpu, arch.insvc[0]));
+ DEFINE(VMM_VPD_VPTA_OFFSET, offsetof(struct vpd, pta));
+ DEFINE(VMM_VPD_VPSR_OFFSET, offsetof(struct vpd, vpsr));
+
+ DEFINE(VMM_CTX_R4_OFFSET, offsetof(union context, gr[4]));
+ DEFINE(VMM_CTX_R5_OFFSET, offsetof(union context, gr[5]));
+ DEFINE(VMM_CTX_R12_OFFSET, offsetof(union context, gr[12]));
+ DEFINE(VMM_CTX_R13_OFFSET, offsetof(union context, gr[13]));
+ DEFINE(VMM_CTX_KR0_OFFSET, offsetof(union context, ar[0]));
+ DEFINE(VMM_CTX_KR1_OFFSET, offsetof(union context, ar[1]));
+ DEFINE(VMM_CTX_B0_OFFSET, offsetof(union context, br[0]));
+ DEFINE(VMM_CTX_B1_OFFSET, offsetof(union context, br[1]));
+ DEFINE(VMM_CTX_B2_OFFSET, offsetof(union context, br[2]));
+ DEFINE(VMM_CTX_RR0_OFFSET, offsetof(union context, rr[0]));
+ DEFINE(VMM_CTX_RSC_OFFSET, offsetof(union context, ar[16]));
+ DEFINE(VMM_CTX_BSPSTORE_OFFSET, offsetof(union context, ar[18]));
+ DEFINE(VMM_CTX_RNAT_OFFSET, offsetof(union context, ar[19]));
+ DEFINE(VMM_CTX_FCR_OFFSET, offsetof(union context, ar[21]));
+ DEFINE(VMM_CTX_EFLAG_OFFSET, offsetof(union context, ar[24]));
+ DEFINE(VMM_CTX_CFLG_OFFSET, offsetof(union context, ar[27]));
+ DEFINE(VMM_CTX_FSR_OFFSET, offsetof(union context, ar[28]));
+ DEFINE(VMM_CTX_FIR_OFFSET, offsetof(union context, ar[29]));
+ DEFINE(VMM_CTX_FDR_OFFSET, offsetof(union context, ar[30]));
+ DEFINE(VMM_CTX_UNAT_OFFSET, offsetof(union context, ar[36]));
+ DEFINE(VMM_CTX_FPSR_OFFSET, offsetof(union context, ar[40]));
+ DEFINE(VMM_CTX_PFS_OFFSET, offsetof(union context, ar[64]));
+ DEFINE(VMM_CTX_LC_OFFSET, offsetof(union context, ar[65]));
+ DEFINE(VMM_CTX_DCR_OFFSET, offsetof(union context, cr[0]));
+ DEFINE(VMM_CTX_IVA_OFFSET, offsetof(union context, cr[2]));
+ DEFINE(VMM_CTX_PTA_OFFSET, offsetof(union context, cr[8]));
+ DEFINE(VMM_CTX_IBR0_OFFSET, offsetof(union context, ibr[0]));
+ DEFINE(VMM_CTX_DBR0_OFFSET, offsetof(union context, dbr[0]));
+ DEFINE(VMM_CTX_F2_OFFSET, offsetof(union context, fr[2]));
+ DEFINE(VMM_CTX_F3_OFFSET, offsetof(union context, fr[3]));
+ DEFINE(VMM_CTX_F32_OFFSET, offsetof(union context, fr[32]));
+ DEFINE(VMM_CTX_F33_OFFSET, offsetof(union context, fr[33]));
+ DEFINE(VMM_CTX_PKR0_OFFSET, offsetof(union context, pkr[0]));
+ DEFINE(VMM_CTX_PSR_OFFSET, offsetof(union context, psr));
+ BLANK();
+}
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
new file mode 100644
index 000000000000..6df073240135
--- /dev/null
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -0,0 +1,1806 @@
+
+/*
+ * kvm_ia64.c: Basic KVM suppport On Itanium series processors
+ *
+ *
+ * Copyright (C) 2007, Intel Corporation.
+ * Xiantao Zhang (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/percpu.h>
+#include <linux/gfp.h>
+#include <linux/fs.h>
+#include <linux/smp.h>
+#include <linux/kvm_host.h>
+#include <linux/kvm.h>
+#include <linux/bitops.h>
+#include <linux/hrtimer.h>
+#include <linux/uaccess.h>
+
+#include <asm/pgtable.h>
+#include <asm/gcc_intrin.h>
+#include <asm/pal.h>
+#include <asm/cacheflush.h>
+#include <asm/div64.h>
+#include <asm/tlb.h>
+
+#include "misc.h"
+#include "vti.h"
+#include "iodev.h"
+#include "ioapic.h"
+#include "lapic.h"
+
+static unsigned long kvm_vmm_base;
+static unsigned long kvm_vsa_base;
+static unsigned long kvm_vm_buffer;
+static unsigned long kvm_vm_buffer_size;
+unsigned long kvm_vmm_gp;
+
+static long vp_env_info;
+
+static struct kvm_vmm_info *kvm_vmm_info;
+
+static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+ { NULL }
+};
+
+
+struct fdesc{
+ unsigned long ip;
+ unsigned long gp;
+};
+
+static void kvm_flush_icache(unsigned long start, unsigned long len)
+{
+ int l;
+
+ for (l = 0; l < (len + 32); l += 32)
+ ia64_fc(start + l);
+
+ ia64_sync_i();
+ ia64_srlz_i();
+}
+
+static void kvm_flush_tlb_all(void)
+{
+ unsigned long i, j, count0, count1, stride0, stride1, addr;
+ long flags;
+
+ addr = local_cpu_data->ptce_base;
+ count0 = local_cpu_data->ptce_count[0];
+ count1 = local_cpu_data->ptce_count[1];
+ stride0 = local_cpu_data->ptce_stride[0];
+ stride1 = local_cpu_data->ptce_stride[1];
+
+ local_irq_save(flags);
+ for (i = 0; i < count0; ++i) {
+ for (j = 0; j < count1; ++j) {
+ ia64_ptce(addr);
+ addr += stride1;
+ }
+ addr += stride0;
+ }
+ local_irq_restore(flags);
+ ia64_srlz_i(); /* srlz.i implies srlz.d */
+}
+
+long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
+ (u64)opt_handler);
+
+ return iprv.status;
+}
+
+static DEFINE_SPINLOCK(vp_lock);
+
+void kvm_arch_hardware_enable(void *garbage)
+{
+ long status;
+ long tmp_base;
+ unsigned long pte;
+ unsigned long saved_psr;
+ int slot;
+
+ pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
+ PAGE_KERNEL));
+ local_irq_save(saved_psr);
+ slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
+ if (slot < 0)
+ return;
+ local_irq_restore(saved_psr);
+
+ spin_lock(&vp_lock);
+ status = ia64_pal_vp_init_env(kvm_vsa_base ?
+ VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
+ __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
+ if (status != 0) {
+ printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
+ return ;
+ }
+
+ if (!kvm_vsa_base) {
+ kvm_vsa_base = tmp_base;
+ printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
+ }
+ spin_unlock(&vp_lock);
+ ia64_ptr_entry(0x3, slot);
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+
+ long status;
+ int slot;
+ unsigned long pte;
+ unsigned long saved_psr;
+ unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);
+
+ pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
+ PAGE_KERNEL));
+
+ local_irq_save(saved_psr);
+ slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
+ if (slot < 0)
+ return;
+ local_irq_restore(saved_psr);
+
+ status = ia64_pal_vp_exit_env(host_iva);
+ if (status)
+ printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
+ status);
+ ia64_ptr_entry(0x3, slot);
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+ *(int *)rtn = 0;
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+
+ int r;
+
+ switch (ext) {
+ case KVM_CAP_IRQCHIP:
+ case KVM_CAP_USER_MEMORY:
+
+ r = 1;
+ break;
+ default:
+ r = 0;
+ }
+ return r;
+
+}
+
+static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
+ gpa_t addr)
+{
+ struct kvm_io_device *dev;
+
+ dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
+
+ return dev;
+}
+
+static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+ kvm_run->hw.hardware_exit_reason = 1;
+ return 0;
+}
+
+static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ struct kvm_mmio_req *p;
+ struct kvm_io_device *mmio_dev;
+
+ p = kvm_get_vcpu_ioreq(vcpu);
+
+ if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
+ goto mmio;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
+ vcpu->mmio_size = kvm_run->mmio.len = p->size;
+ vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
+
+ if (vcpu->mmio_is_write)
+ memcpy(vcpu->mmio_data, &p->data, p->size);
+ memcpy(kvm_run->mmio.data, &p->data, p->size);
+ kvm_run->exit_reason = KVM_EXIT_MMIO;
+ return 0;
+mmio:
+ mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr);
+ if (mmio_dev) {
+ if (!p->dir)
+ kvm_iodevice_write(mmio_dev, p->addr, p->size,
+ &p->data);
+ else
+ kvm_iodevice_read(mmio_dev, p->addr, p->size,
+ &p->data);
+
+ } else
+ printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
+ p->state = STATE_IORESP_READY;
+
+ return 1;
+}
+
+static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ struct exit_ctl_data *p;
+
+ p = kvm_get_exit_data(vcpu);
+
+ if (p->exit_reason == EXIT_REASON_PAL_CALL)
+ return kvm_pal_emul(vcpu, kvm_run);
+ else {
+ kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+ kvm_run->hw.hardware_exit_reason = 2;
+ return 0;
+ }
+}
+
+static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ struct exit_ctl_data *p;
+
+ p = kvm_get_exit_data(vcpu);
+
+ if (p->exit_reason == EXIT_REASON_SAL_CALL) {
+ kvm_sal_emul(vcpu);
+ return 1;
+ } else {
+ kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+ kvm_run->hw.hardware_exit_reason = 3;
+ return 0;
+ }
+
+}
+
+/*
+ * offset: address offset to IPI space.
+ * value: deliver value.
+ */
+static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
+ uint64_t vector)
+{
+ switch (dm) {
+ case SAPIC_FIXED:
+ kvm_apic_set_irq(vcpu, vector, 0);
+ break;
+ case SAPIC_NMI:
+ kvm_apic_set_irq(vcpu, 2, 0);
+ break;
+ case SAPIC_EXTINT:
+ kvm_apic_set_irq(vcpu, 0, 0);
+ break;
+ case SAPIC_INIT:
+ case SAPIC_PMI:
+ default:
+ printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
+ break;
+ }
+}
+
+static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
+ unsigned long eid)
+{
+ union ia64_lid lid;
+ int i;
+
+ for (i = 0; i < KVM_MAX_VCPUS; i++) {
+ if (kvm->vcpus[i]) {
+ lid.val = VCPU_LID(kvm->vcpus[i]);
+ if (lid.id == id && lid.eid == eid)
+ return kvm->vcpus[i];
+ }
+ }
+
+ return NULL;
+}
+
+static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
+ struct kvm_vcpu *target_vcpu;
+ struct kvm_pt_regs *regs;
+ union ia64_ipi_a addr = p->u.ipi_data.addr;
+ union ia64_ipi_d data = p->u.ipi_data.data;
+
+ target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
+ if (!target_vcpu)
+ return handle_vm_error(vcpu, kvm_run);
+
+ if (!target_vcpu->arch.launched) {
+ regs = vcpu_regs(target_vcpu);
+
+ regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
+ regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
+
+ target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ if (waitqueue_active(&target_vcpu->wq))
+ wake_up_interruptible(&target_vcpu->wq);
+ } else {
+ vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
+ if (target_vcpu != vcpu)
+ kvm_vcpu_kick(target_vcpu);
+ }
+
+ return 1;
+}
+
+struct call_data {
+ struct kvm_ptc_g ptc_g_data;
+ struct kvm_vcpu *vcpu;
+};
+
+static void vcpu_global_purge(void *info)
+{
+ struct call_data *p = (struct call_data *)info;
+ struct kvm_vcpu *vcpu = p->vcpu;
+
+ if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
+ return;
+
+ set_bit(KVM_REQ_PTC_G, &vcpu->requests);
+ if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) {
+ vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] =
+ p->ptc_g_data;
+ } else {
+ clear_bit(KVM_REQ_PTC_G, &vcpu->requests);
+ vcpu->arch.ptc_g_count = 0;
+ set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
+ }
+}
+
+static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
+ struct kvm *kvm = vcpu->kvm;
+ struct call_data call_data;
+ int i;
+ call_data.ptc_g_data = p->u.ptc_g_data;
+
+ for (i = 0; i < KVM_MAX_VCPUS; i++) {
+ if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
+ KVM_MP_STATE_UNINITIALIZED ||
+ vcpu == kvm->vcpus[i])
+ continue;
+
+ if (waitqueue_active(&kvm->vcpus[i]->wq))
+ wake_up_interruptible(&kvm->vcpus[i]->wq);
+
+ if (kvm->vcpus[i]->cpu != -1) {
+ call_data.vcpu = kvm->vcpus[i];
+ smp_call_function_single(kvm->vcpus[i]->cpu,
+ vcpu_global_purge, &call_data, 0, 1);
+ } else
+ printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
+
+ }
+ return 1;
+}
+
+static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ return 1;
+}
+
+int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+{
+
+ ktime_t kt;
+ long itc_diff;
+ unsigned long vcpu_now_itc;
+
+ unsigned long expires;
+ struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
+ unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
+ struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+
+ vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
+
+ if (time_after(vcpu_now_itc, vpd->itm)) {
+ vcpu->arch.timer_check = 1;
+ return 1;
+ }
+ itc_diff = vpd->itm - vcpu_now_itc;
+ if (itc_diff < 0)
+ itc_diff = -itc_diff;
+
+ expires = div64_64(itc_diff, cyc_per_usec);
+ kt = ktime_set(0, 1000 * expires);
+ vcpu->arch.ht_active = 1;
+ hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
+
+ if (irqchip_in_kernel(vcpu->kvm)) {
+ vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
+ kvm_vcpu_block(vcpu);
+ hrtimer_cancel(p_ht);
+ vcpu->arch.ht_active = 0;
+
+ if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
+ return -EINTR;
+ return 1;
+ } else {
+ printk(KERN_ERR"kvm: Unsupported userspace halt!");
+ return 0;
+ }
+}
+
+static int handle_vm_shutdown(struct kvm_vcpu *vcpu,
+ struct kvm_run *kvm_run)
+{
+ kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
+ return 0;
+}
+
+static int handle_external_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_run *kvm_run)
+{
+ return 1;
+}
+
+static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
+ struct kvm_run *kvm_run) = {
+ [EXIT_REASON_VM_PANIC] = handle_vm_error,
+ [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio,
+ [EXIT_REASON_PAL_CALL] = handle_pal_call,
+ [EXIT_REASON_SAL_CALL] = handle_sal_call,
+ [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6,
+ [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown,
+ [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
+ [EXIT_REASON_IPI] = handle_ipi,
+ [EXIT_REASON_PTC_G] = handle_global_purge,
+
+};
+
+static const int kvm_vti_max_exit_handlers =
+ sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
+
+static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu)
+{
+}
+
+static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
+{
+ struct exit_ctl_data *p_exit_data;
+
+ p_exit_data = kvm_get_exit_data(vcpu);
+ return p_exit_data->exit_reason;
+}
+
+/*
+ * The guest has exited. See if we can fix it or if we need userspace
+ * assistance.
+ */
+static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+{
+ u32 exit_reason = kvm_get_exit_reason(vcpu);
+ vcpu->arch.last_exit = exit_reason;
+
+ if (exit_reason < kvm_vti_max_exit_handlers
+ && kvm_vti_exit_handlers[exit_reason])
+ return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run);
+ else {
+ kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+ kvm_run->hw.hardware_exit_reason = exit_reason;
+ }
+ return 0;
+}
+
+static inline void vti_set_rr6(unsigned long rr6)
+{
+ ia64_set_rr(RR6, rr6);
+ ia64_srlz_i();
+}
+
+static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
+{
+ unsigned long pte;
+ struct kvm *kvm = vcpu->kvm;
+ int r;
+
+ /*Insert a pair of tr to map vmm*/
+ pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
+ r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
+ if (r < 0)
+ goto out;
+ vcpu->arch.vmm_tr_slot = r;
+ /*Insert a pairt of tr to map data of vm*/
+ pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
+ r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
+ pte, KVM_VM_DATA_SHIFT);
+ if (r < 0)
+ goto out;
+ vcpu->arch.vm_tr_slot = r;
+ r = 0;
+out:
+ return r;
+
+}
+
+static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
+{
+
+ ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
+ ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
+
+}
+
+static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
+{
+ int cpu = smp_processor_id();
+
+ if (vcpu->arch.last_run_cpu != cpu ||
+ per_cpu(last_vcpu, cpu) != vcpu) {
+ per_cpu(last_vcpu, cpu) = vcpu;
+ vcpu->arch.last_run_cpu = cpu;
+ kvm_flush_tlb_all();
+ }
+
+ vcpu->arch.host_rr6 = ia64_get_rr(RR6);
+ vti_set_rr6(vcpu->arch.vmm_rr);
+ return kvm_insert_vmm_mapping(vcpu);
+}
+static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
+{
+ kvm_purge_vmm_mapping(vcpu);
+ vti_set_rr6(vcpu->arch.host_rr6);
+}
+
+static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ union context *host_ctx, *guest_ctx;
+ int r;
+
+ /*Get host and guest context with guest address space.*/
+ host_ctx = kvm_get_host_context(vcpu);
+ guest_ctx = kvm_get_guest_context(vcpu);
+
+ r = kvm_vcpu_pre_transition(vcpu);
+ if (r < 0)
+ goto out;
+ kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
+ kvm_vcpu_post_transition(vcpu);
+ r = 0;
+out:
+ return r;
+}
+
+static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ int r;
+
+again:
+ preempt_disable();
+
+ kvm_prepare_guest_switch(vcpu);
+ local_irq_disable();
+
+ if (signal_pending(current)) {
+ local_irq_enable();
+ preempt_enable();
+ r = -EINTR;
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ goto out;
+ }
+
+ vcpu->guest_mode = 1;
+ kvm_guest_enter();
+
+ r = vti_vcpu_run(vcpu, kvm_run);
+ if (r < 0) {
+ local_irq_enable();
+ preempt_enable();
+ kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ goto out;
+ }
+
+ vcpu->arch.launched = 1;
+ vcpu->guest_mode = 0;
+ local_irq_enable();
+
+ /*
+ * We must have an instruction between local_irq_enable() and
+ * kvm_guest_exit(), so the timer interrupt isn't delayed by
+ * the interrupt shadow. The stat.exits increment will do nicely.
+ * But we need to prevent reordering, hence this barrier():
+ */
+ barrier();
+
+ kvm_guest_exit();
+
+ preempt_enable();
+
+ r = kvm_handle_exit(kvm_run, vcpu);
+
+ if (r > 0) {
+ if (!need_resched())
+ goto again;
+ }
+
+out:
+ if (r > 0) {
+ kvm_resched(vcpu);
+ goto again;
+ }
+
+ return r;
+}
+
+static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
+
+ if (!vcpu->mmio_is_write)
+ memcpy(&p->data, vcpu->mmio_data, 8);
+ p->state = STATE_IORESP_READY;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ int r;
+ sigset_t sigsaved;
+
+ vcpu_load(vcpu);
+
+ if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
+ kvm_vcpu_block(vcpu);
+ vcpu_put(vcpu);
+ return -EAGAIN;
+ }
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+ if (vcpu->mmio_needed) {
+ memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
+ kvm_set_mmio_data(vcpu);
+ vcpu->mmio_read_completed = 1;
+ vcpu->mmio_needed = 0;
+ }
+ r = __vcpu_run(vcpu, kvm_run);
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+ vcpu_put(vcpu);
+ return r;
+}
+
+/*
+ * Allocate 16M memory for every vm to hold its specific data.
+ * Its memory map is defined in kvm_host.h.
+ */
+static struct kvm *kvm_alloc_kvm(void)
+{
+
+ struct kvm *kvm;
+ uint64_t vm_base;
+
+ vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
+
+ if (!vm_base)
+ return ERR_PTR(-ENOMEM);
+ printk(KERN_DEBUG"kvm: VM data's base Address:0x%lx\n", vm_base);
+
+ /* Zero all pages before use! */
+ memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
+
+ kvm = (struct kvm *)(vm_base + KVM_VM_OFS);
+ kvm->arch.vm_base = vm_base;
+
+ return kvm;
+}
+
+struct kvm_io_range {
+ unsigned long start;
+ unsigned long size;
+ unsigned long type;
+};
+
+static const struct kvm_io_range io_ranges[] = {
+ {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
+ {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
+ {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
+ {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
+ {PIB_START, PIB_SIZE, GPFN_PIB},
+};
+
+static void kvm_build_io_pmt(struct kvm *kvm)
+{
+ unsigned long i, j;
+
+ /* Mark I/O ranges */
+ for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range));
+ i++) {
+ for (j = io_ranges[i].start;
+ j < io_ranges[i].start + io_ranges[i].size;
+ j += PAGE_SIZE)
+ kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT,
+ io_ranges[i].type, 0);
+ }
+
+}
+
+/*Use unused rids to virtualize guest rid.*/
+#define GUEST_PHYSICAL_RR0 0x1739
+#define GUEST_PHYSICAL_RR4 0x2739
+#define VMM_INIT_RR 0x1660
+
+static void kvm_init_vm(struct kvm *kvm)
+{
+ long vm_base;
+
+ BUG_ON(!kvm);
+
+ kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
+ kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
+ kvm->arch.vmm_init_rr = VMM_INIT_RR;
+
+ vm_base = kvm->arch.vm_base;
+ if (vm_base) {
+ kvm->arch.vhpt_base = vm_base + KVM_VHPT_OFS;
+ kvm->arch.vtlb_base = vm_base + KVM_VTLB_OFS;
+ kvm->arch.vpd_base = vm_base + KVM_VPD_OFS;
+ }
+
+ /*
+ *Fill P2M entries for MMIO/IO ranges
+ */
+ kvm_build_io_pmt(kvm);
+
+}
+
+struct kvm *kvm_arch_create_vm(void)
+{
+ struct kvm *kvm = kvm_alloc_kvm();
+
+ if (IS_ERR(kvm))
+ return ERR_PTR(-ENOMEM);
+ kvm_init_vm(kvm);
+
+ return kvm;
+
+}
+
+static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
+ struct kvm_irqchip *chip)
+{
+ int r;
+
+ r = 0;
+ switch (chip->chip_id) {
+ case KVM_IRQCHIP_IOAPIC:
+ memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm),
+ sizeof(struct kvm_ioapic_state));
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+ return r;
+}
+
+static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
+{
+ int r;
+
+ r = 0;
+ switch (chip->chip_id) {
+ case KVM_IRQCHIP_IOAPIC:
+ memcpy(ioapic_irqchip(kvm),
+ &chip->chip.ioapic,
+ sizeof(struct kvm_ioapic_state));
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+ return r;
+}
+
+#define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+ struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+ int r;
+
+ vcpu_load(vcpu);
+
+ for (i = 0; i < 16; i++) {
+ vpd->vgr[i] = regs->vpd.vgr[i];
+ vpd->vbgr[i] = regs->vpd.vbgr[i];
+ }
+ for (i = 0; i < 128; i++)
+ vpd->vcr[i] = regs->vpd.vcr[i];
+ vpd->vhpi = regs->vpd.vhpi;
+ vpd->vnat = regs->vpd.vnat;
+ vpd->vbnat = regs->vpd.vbnat;
+ vpd->vpsr = regs->vpd.vpsr;
+
+ vpd->vpr = regs->vpd.vpr;
+
+ r = -EFAULT;
+ r = copy_from_user(&vcpu->arch.guest, regs->saved_guest,
+ sizeof(union context));
+ if (r)
+ goto out;
+ r = copy_from_user(vcpu + 1, regs->saved_stack +
+ sizeof(struct kvm_vcpu),
+ IA64_STK_OFFSET - sizeof(struct kvm_vcpu));
+ if (r)
+ goto out;
+ vcpu->arch.exit_data =
+ ((struct kvm_vcpu *)(regs->saved_stack))->arch.exit_data;
+
+ RESTORE_REGS(mp_state);
+ RESTORE_REGS(vmm_rr);
+ memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS);
+ memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS);
+ RESTORE_REGS(itr_regions);
+ RESTORE_REGS(dtr_regions);
+ RESTORE_REGS(tc_regions);
+ RESTORE_REGS(irq_check);
+ RESTORE_REGS(itc_check);
+ RESTORE_REGS(timer_check);
+ RESTORE_REGS(timer_pending);
+ RESTORE_REGS(last_itc);
+ for (i = 0; i < 8; i++) {
+ vcpu->arch.vrr[i] = regs->vrr[i];
+ vcpu->arch.ibr[i] = regs->ibr[i];
+ vcpu->arch.dbr[i] = regs->dbr[i];
+ }
+ for (i = 0; i < 4; i++)
+ vcpu->arch.insvc[i] = regs->insvc[i];
+ RESTORE_REGS(xtp);
+ RESTORE_REGS(metaphysical_rr0);
+ RESTORE_REGS(metaphysical_rr4);
+ RESTORE_REGS(metaphysical_saved_rr0);
+ RESTORE_REGS(metaphysical_saved_rr4);
+ RESTORE_REGS(fp_psr);
+ RESTORE_REGS(saved_gp);
+
+ vcpu->arch.irq_new_pending = 1;
+ vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC);
+ set_bit(KVM_REQ_RESUME, &vcpu->requests);
+
+ vcpu_put(vcpu);
+ r = 0;
+out:
+ return r;
+}
+
+long kvm_arch_vm_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm *kvm = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ int r = -EINVAL;
+
+ switch (ioctl) {
+ case KVM_SET_MEMORY_REGION: {
+ struct kvm_memory_region kvm_mem;
+ struct kvm_userspace_memory_region kvm_userspace_mem;
+
+ r = -EFAULT;
+ if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
+ goto out;
+ kvm_userspace_mem.slot = kvm_mem.slot;
+ kvm_userspace_mem.flags = kvm_mem.flags;
+ kvm_userspace_mem.guest_phys_addr =
+ kvm_mem.guest_phys_addr;
+ kvm_userspace_mem.memory_size = kvm_mem.memory_size;
+ r = kvm_vm_ioctl_set_memory_region(kvm,
+ &kvm_userspace_mem, 0);
+ if (r)
+ goto out;
+ break;
+ }
+ case KVM_CREATE_IRQCHIP:
+ r = -EFAULT;
+ r = kvm_ioapic_init(kvm);
+ if (r)
+ goto out;
+ break;
+ case KVM_IRQ_LINE: {
+ struct kvm_irq_level irq_event;
+
+ r = -EFAULT;
+ if (copy_from_user(&irq_event, argp, sizeof irq_event))
+ goto out;
+ if (irqchip_in_kernel(kvm)) {
+ mutex_lock(&kvm->lock);
+ kvm_ioapic_set_irq(kvm->arch.vioapic,
+ irq_event.irq,
+ irq_event.level);
+ mutex_unlock(&kvm->lock);
+ r = 0;
+ }
+ break;
+ }
+ case KVM_GET_IRQCHIP: {
+ /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
+ struct kvm_irqchip chip;
+
+ r = -EFAULT;
+ if (copy_from_user(&chip, argp, sizeof chip))
+ goto out;
+ r = -ENXIO;
+ if (!irqchip_in_kernel(kvm))
+ goto out;
+ r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
+ if (r)
+ goto out;
+ r = -EFAULT;
+ if (copy_to_user(argp, &chip, sizeof chip))
+ goto out;
+ r = 0;
+ break;
+ }
+ case KVM_SET_IRQCHIP: {
+ /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
+ struct kvm_irqchip chip;
+
+ r = -EFAULT;
+ if (copy_from_user(&chip, argp, sizeof chip))
+ goto out;
+ r = -ENXIO;
+ if (!irqchip_in_kernel(kvm))
+ goto out;
+ r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
+ if (r)
+ goto out;
+ r = 0;
+ break;
+ }
+ default:
+ ;
+ }
+out:
+ return r;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -EINVAL;
+
+}
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr)
+{
+
+ return -EINVAL;
+}
+
+static int kvm_alloc_vmm_area(void)
+{
+ if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
+ kvm_vmm_base = __get_free_pages(GFP_KERNEL,
+ get_order(KVM_VMM_SIZE));
+ if (!kvm_vmm_base)
+ return -ENOMEM;
+
+ memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
+ kvm_vm_buffer = kvm_vmm_base + VMM_SIZE;
+
+ printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
+ kvm_vmm_base, kvm_vm_buffer);
+ }
+
+ return 0;
+}
+
+static void kvm_free_vmm_area(void)
+{
+ if (kvm_vmm_base) {
+ /*Zero this area before free to avoid bits leak!!*/
+ memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
+ free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE));
+ kvm_vmm_base = 0;
+ kvm_vm_buffer = 0;
+ kvm_vsa_base = 0;
+ }
+}
+
+/*
+ * Make sure that a cpu that is being hot-unplugged does not have any vcpus
+ * cached on it. Leave it as blank for IA64.
+ */
+void decache_vcpus_on_cpu(int cpu)
+{
+}
+
+static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
+static int vti_init_vpd(struct kvm_vcpu *vcpu)
+{
+ int i;
+ union cpuid3_t cpuid3;
+ struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+
+ if (IS_ERR(vpd))
+ return PTR_ERR(vpd);
+
+ /* CPUID init */
+ for (i = 0; i < 5; i++)
+ vpd->vcpuid[i] = ia64_get_cpuid(i);
+
+ /* Limit the CPUID number to 5 */
+ cpuid3.value = vpd->vcpuid[3];
+ cpuid3.number = 4; /* 5 - 1 */
+ vpd->vcpuid[3] = cpuid3.value;
+
+ /*Set vac and vdc fields*/
+ vpd->vac.a_from_int_cr = 1;
+ vpd->vac.a_to_int_cr = 1;
+ vpd->vac.a_from_psr = 1;
+ vpd->vac.a_from_cpuid = 1;
+ vpd->vac.a_cover = 1;
+ vpd->vac.a_bsw = 1;
+ vpd->vac.a_int = 1;
+ vpd->vdc.d_vmsw = 1;
+
+ /*Set virtual buffer*/
+ vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE;
+
+ return 0;
+}
+
+static int vti_create_vp(struct kvm_vcpu *vcpu)
+{
+ long ret;
+ struct vpd *vpd = vcpu->arch.vpd;
+ unsigned long vmm_ivt;
+
+ vmm_ivt = kvm_vmm_info->vmm_ivt;
+
+ printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt);
+
+ ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0);
+
+ if (ret) {
+ printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void init_ptce_info(struct kvm_vcpu *vcpu)
+{
+ ia64_ptce_info_t ptce = {0};
+
+ ia64_get_ptce(&ptce);
+ vcpu->arch.ptce_base = ptce.base;
+ vcpu->arch.ptce_count[0] = ptce.count[0];
+ vcpu->arch.ptce_count[1] = ptce.count[1];
+ vcpu->arch.ptce_stride[0] = ptce.stride[0];
+ vcpu->arch.ptce_stride[1] = ptce.stride[1];
+}
+
+static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
+{
+ struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
+
+ if (hrtimer_cancel(p_ht))
+ hrtimer_start(p_ht, p_ht->expires, HRTIMER_MODE_ABS);
+}
+
+static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
+{
+ struct kvm_vcpu *vcpu;
+ wait_queue_head_t *q;
+
+ vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
+ if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
+ goto out;
+
+ q = &vcpu->wq;
+ if (waitqueue_active(q)) {
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ wake_up_interruptible(q);
+ }
+out:
+ vcpu->arch.timer_check = 1;
+ return HRTIMER_NORESTART;
+}
+
+#define PALE_RESET_ENTRY 0x80000000ffffffb0UL
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu *v;
+ int r;
+ int i;
+ long itc_offset;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+ union context *p_ctx = &vcpu->arch.guest;
+ struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu);
+
+ /*Init vcpu context for first run.*/
+ if (IS_ERR(vmm_vcpu))
+ return PTR_ERR(vmm_vcpu);
+
+ if (vcpu->vcpu_id == 0) {
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+
+ /*Set entry address for first run.*/
+ regs->cr_iip = PALE_RESET_ENTRY;
+
+ /*Initilize itc offset for vcpus*/
+ itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
+ for (i = 0; i < MAX_VCPU_NUM; i++) {
+ v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i);
+ v->arch.itc_offset = itc_offset;
+ v->arch.last_itc = 0;
+ }
+ } else
+ vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
+
+ r = -ENOMEM;
+ vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
+ if (!vcpu->arch.apic)
+ goto out;
+ vcpu->arch.apic->vcpu = vcpu;
+
+ p_ctx->gr[1] = 0;
+ p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + IA64_STK_OFFSET);
+ p_ctx->gr[13] = (unsigned long)vmm_vcpu;
+ p_ctx->psr = 0x1008522000UL;
+ p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
+ p_ctx->caller_unat = 0;
+ p_ctx->pr = 0x0;
+ p_ctx->ar[36] = 0x0; /*unat*/
+ p_ctx->ar[19] = 0x0; /*rnat*/
+ p_ctx->ar[18] = (unsigned long)vmm_vcpu +
+ ((sizeof(struct kvm_vcpu)+15) & ~15);
+ p_ctx->ar[64] = 0x0; /*pfs*/
+ p_ctx->cr[0] = 0x7e04UL;
+ p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt;
+ p_ctx->cr[8] = 0x3c;
+
+ /*Initilize region register*/
+ p_ctx->rr[0] = 0x30;
+ p_ctx->rr[1] = 0x30;
+ p_ctx->rr[2] = 0x30;
+ p_ctx->rr[3] = 0x30;
+ p_ctx->rr[4] = 0x30;
+ p_ctx->rr[5] = 0x30;
+ p_ctx->rr[7] = 0x30;
+
+ /*Initilize branch register 0*/
+ p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry;
+
+ vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr;
+ vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0;
+ vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4;
+
+ hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ vcpu->arch.hlt_timer.function = hlt_timer_fn;
+
+ vcpu->arch.last_run_cpu = -1;
+ vcpu->arch.vpd = (struct vpd *)VPD_ADDR(vcpu->vcpu_id);
+ vcpu->arch.vsa_base = kvm_vsa_base;
+ vcpu->arch.__gp = kvm_vmm_gp;
+ vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
+ vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_ADDR(vcpu->vcpu_id);
+ vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_ADDR(vcpu->vcpu_id);
+ init_ptce_info(vcpu);
+
+ r = 0;
+out:
+ return r;
+}
+
+static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
+{
+ unsigned long psr;
+ int r;
+
+ local_irq_save(psr);
+ r = kvm_insert_vmm_mapping(vcpu);
+ if (r)
+ goto fail;
+ r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
+ if (r)
+ goto fail;
+
+ r = vti_init_vpd(vcpu);
+ if (r) {
+ printk(KERN_DEBUG"kvm: vpd init error!!\n");
+ goto uninit;
+ }
+
+ r = vti_create_vp(vcpu);
+ if (r)
+ goto uninit;
+
+ kvm_purge_vmm_mapping(vcpu);
+ local_irq_restore(psr);
+
+ return 0;
+uninit:
+ kvm_vcpu_uninit(vcpu);
+fail:
+ return r;
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+ unsigned int id)
+{
+ struct kvm_vcpu *vcpu;
+ unsigned long vm_base = kvm->arch.vm_base;
+ int r;
+ int cpu;
+
+ r = -ENOMEM;
+ if (!vm_base) {
+ printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
+ goto fail;
+ }
+ vcpu = (struct kvm_vcpu *)(vm_base + KVM_VCPU_OFS + VCPU_SIZE * id);
+ vcpu->kvm = kvm;
+
+ cpu = get_cpu();
+ vti_vcpu_load(vcpu, cpu);
+ r = vti_vcpu_setup(vcpu, id);
+ put_cpu();
+
+ if (r) {
+ printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
+ goto fail;
+ }
+
+ return vcpu;
+fail:
+ return ERR_PTR(r);
+}
+
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
+ struct kvm_debug_guest *dbg)
+{
+ return -EINVAL;
+}
+
+static void free_kvm(struct kvm *kvm)
+{
+ unsigned long vm_base = kvm->arch.vm_base;
+
+ if (vm_base) {
+ memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
+ free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
+ }
+
+}
+
+static void kvm_release_vm_pages(struct kvm *kvm)
+{
+ struct kvm_memory_slot *memslot;
+ int i, j;
+ unsigned long base_gfn;
+
+ for (i = 0; i < kvm->nmemslots; i++) {
+ memslot = &kvm->memslots[i];
+ base_gfn = memslot->base_gfn;
+
+ for (j = 0; j < memslot->npages; j++) {
+ if (memslot->rmap[j])
+ put_page((struct page *)memslot->rmap[j]);
+ }
+ }
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+ kfree(kvm->arch.vioapic);
+ kvm_release_vm_pages(kvm);
+ kvm_free_physmem(kvm);
+ free_kvm(kvm);
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+}
+
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ if (cpu != vcpu->cpu) {
+ vcpu->cpu = cpu;
+ if (vcpu->arch.ht_active)
+ kvm_migrate_hlt_timer(vcpu);
+ }
+}
+
+#define SAVE_REGS(_x) regs->_x = vcpu->arch._x
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+ int r;
+ struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+ vcpu_load(vcpu);
+
+ for (i = 0; i < 16; i++) {
+ regs->vpd.vgr[i] = vpd->vgr[i];
+ regs->vpd.vbgr[i] = vpd->vbgr[i];
+ }
+ for (i = 0; i < 128; i++)
+ regs->vpd.vcr[i] = vpd->vcr[i];
+ regs->vpd.vhpi = vpd->vhpi;
+ regs->vpd.vnat = vpd->vnat;
+ regs->vpd.vbnat = vpd->vbnat;
+ regs->vpd.vpsr = vpd->vpsr;
+ regs->vpd.vpr = vpd->vpr;
+
+ r = -EFAULT;
+ r = copy_to_user(regs->saved_guest, &vcpu->arch.guest,
+ sizeof(union context));
+ if (r)
+ goto out;
+ r = copy_to_user(regs->saved_stack, (void *)vcpu, IA64_STK_OFFSET);
+ if (r)
+ goto out;
+ SAVE_REGS(mp_state);
+ SAVE_REGS(vmm_rr);
+ memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
+ memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS);
+ SAVE_REGS(itr_regions);
+ SAVE_REGS(dtr_regions);
+ SAVE_REGS(tc_regions);
+ SAVE_REGS(irq_check);
+ SAVE_REGS(itc_check);
+ SAVE_REGS(timer_check);
+ SAVE_REGS(timer_pending);
+ SAVE_REGS(last_itc);
+ for (i = 0; i < 8; i++) {
+ regs->vrr[i] = vcpu->arch.vrr[i];
+ regs->ibr[i] = vcpu->arch.ibr[i];
+ regs->dbr[i] = vcpu->arch.dbr[i];
+ }
+ for (i = 0; i < 4; i++)
+ regs->insvc[i] = vcpu->arch.insvc[i];
+ regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC);
+ SAVE_REGS(xtp);
+ SAVE_REGS(metaphysical_rr0);
+ SAVE_REGS(metaphysical_rr4);
+ SAVE_REGS(metaphysical_saved_rr0);
+ SAVE_REGS(metaphysical_saved_rr4);
+ SAVE_REGS(fp_psr);
+ SAVE_REGS(saved_gp);
+ vcpu_put(vcpu);
+ r = 0;
+out:
+ return r;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+
+ hrtimer_cancel(&vcpu->arch.hlt_timer);
+ kfree(vcpu->arch.apic);
+}
+
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_set_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ unsigned long i;
+ struct page *page;
+ int npages = mem->memory_size >> PAGE_SHIFT;
+ struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
+ unsigned long base_gfn = memslot->base_gfn;
+
+ for (i = 0; i < npages; i++) {
+ page = gfn_to_page(kvm, base_gfn + i);
+ kvm_set_pmt_entry(kvm, base_gfn + i,
+ page_to_pfn(page) << PAGE_SHIFT,
+ _PAGE_AR_RWX|_PAGE_MA_WB);
+ memslot->rmap[i] = (unsigned long)page;
+ }
+
+ return 0;
+}
+
+
+long kvm_arch_dev_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ return -EINVAL;
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ kvm_vcpu_uninit(vcpu);
+}
+
+static int vti_cpu_has_kvm_support(void)
+{
+ long avail = 1, status = 1, control = 1;
+ long ret;
+
+ ret = ia64_pal_proc_get_features(&avail, &status, &control, 0);
+ if (ret)
+ goto out;
+
+ if (!(avail & PAL_PROC_VM_BIT))
+ goto out;
+
+ printk(KERN_DEBUG"kvm: Hardware Supports VT\n");
+
+ ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
+ if (ret)
+ goto out;
+ printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size);
+
+ if (!(vp_env_info & VP_OPCODE)) {
+ printk(KERN_WARNING"kvm: No opcode ability on hardware, "
+ "vm_env_info:0x%lx\n", vp_env_info);
+ }
+
+ return 1;
+out:
+ return 0;
+}
+
+static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
+ struct module *module)
+{
+ unsigned long module_base;
+ unsigned long vmm_size;
+
+ unsigned long vmm_offset, func_offset, fdesc_offset;
+ struct fdesc *p_fdesc;
+
+ BUG_ON(!module);
+
+ if (!kvm_vmm_base) {
+ printk("kvm: kvm area hasn't been initilized yet!!\n");
+ return -EFAULT;
+ }
+
+ /*Calculate new position of relocated vmm module.*/
+ module_base = (unsigned long)module->module_core;
+ vmm_size = module->core_size;
+ if (unlikely(vmm_size > KVM_VMM_SIZE))
+ return -EFAULT;
+
+ memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
+ kvm_flush_icache(kvm_vmm_base, vmm_size);
+
+ /*Recalculate kvm_vmm_info based on new VMM*/
+ vmm_offset = vmm_info->vmm_ivt - module_base;
+ kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset;
+ printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n",
+ kvm_vmm_info->vmm_ivt);
+
+ fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base;
+ kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE +
+ fdesc_offset);
+ func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base;
+ p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
+ p_fdesc->ip = KVM_VMM_BASE + func_offset;
+ p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base);
+
+ printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n",
+ KVM_VMM_BASE+func_offset);
+
+ fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base;
+ kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE +
+ fdesc_offset);
+ func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base;
+ p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
+ p_fdesc->ip = KVM_VMM_BASE + func_offset;
+ p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base);
+
+ kvm_vmm_gp = p_fdesc->gp;
+
+ printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n",
+ kvm_vmm_info->vmm_entry);
+ printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
+ KVM_VMM_BASE + func_offset);
+
+ return 0;
+}
+
+int kvm_arch_init(void *opaque)
+{
+ int r;
+ struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque;
+
+ if (!vti_cpu_has_kvm_support()) {
+ printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n");
+ r = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (kvm_vmm_info) {
+ printk(KERN_ERR "kvm: Already loaded VMM module!\n");
+ r = -EEXIST;
+ goto out;
+ }
+
+ r = -ENOMEM;
+ kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL);
+ if (!kvm_vmm_info)
+ goto out;
+
+ if (kvm_alloc_vmm_area())
+ goto out_free0;
+
+ r = kvm_relocate_vmm(vmm_info, vmm_info->module);
+ if (r)
+ goto out_free1;
+
+ return 0;
+
+out_free1:
+ kvm_free_vmm_area();
+out_free0:
+ kfree(kvm_vmm_info);
+out:
+ return r;
+}
+
+void kvm_arch_exit(void)
+{
+ kvm_free_vmm_area();
+ kfree(kvm_vmm_info);
+ kvm_vmm_info = NULL;
+}
+
+static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log)
+{
+ struct kvm_memory_slot *memslot;
+ int r, i;
+ long n, base;
+ unsigned long *dirty_bitmap = (unsigned long *)((void *)kvm - KVM_VM_OFS
+ + KVM_MEM_DIRTY_LOG_OFS);
+
+ r = -EINVAL;
+ if (log->slot >= KVM_MEMORY_SLOTS)
+ goto out;
+
+ memslot = &kvm->memslots[log->slot];
+ r = -ENOENT;
+ if (!memslot->dirty_bitmap)
+ goto out;
+
+ n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ base = memslot->base_gfn / BITS_PER_LONG;
+
+ for (i = 0; i < n/sizeof(long); ++i) {
+ memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
+ dirty_bitmap[base + i] = 0;
+ }
+ r = 0;
+out:
+ return r;
+}
+
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log)
+{
+ int r;
+ int n;
+ struct kvm_memory_slot *memslot;
+ int is_dirty = 0;
+
+ spin_lock(&kvm->arch.dirty_log_lock);
+
+ r = kvm_ia64_sync_dirty_log(kvm, log);
+ if (r)
+ goto out;
+
+ r = kvm_get_dirty_log(kvm, log, &is_dirty);
+ if (r)
+ goto out;
+
+ /* If nothing is dirty, don't bother messing with page tables. */
+ if (is_dirty) {
+ kvm_flush_remote_tlbs(kvm);
+ memslot = &kvm->memslots[log->slot];
+ n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ memset(memslot->dirty_bitmap, 0, n);
+ }
+ r = 0;
+out:
+ spin_unlock(&kvm->arch.dirty_log_lock);
+ return r;
+}
+
+int kvm_arch_hardware_setup(void)
+{
+ return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+static void vcpu_kick_intr(void *info)
+{
+#ifdef DEBUG
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
+ printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu);
+#endif
+}
+
+void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ int ipi_pcpu = vcpu->cpu;
+
+ if (waitqueue_active(&vcpu->wq))
+ wake_up_interruptible(&vcpu->wq);
+
+ if (vcpu->guest_mode)
+ smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
+}
+
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
+{
+
+ struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+
+ if (!test_and_set_bit(vec, &vpd->irr[0])) {
+ vcpu->arch.irq_new_pending = 1;
+ if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
+ kvm_vcpu_kick(vcpu);
+ else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ if (waitqueue_active(&vcpu->wq))
+ wake_up_interruptible(&vcpu->wq);
+ }
+ return 1;
+ }
+ return 0;
+}
+
+int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
+{
+ return apic->vcpu->vcpu_id == dest;
+}
+
+int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
+{
+ return 0;
+}
+
+struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
+ unsigned long bitmap)
+{
+ struct kvm_vcpu *lvcpu = kvm->vcpus[0];
+ int i;
+
+ for (i = 1; i < KVM_MAX_VCPUS; i++) {
+ if (!kvm->vcpus[i])
+ continue;
+ if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
+ lvcpu = kvm->vcpus[i];
+ }
+
+ return lvcpu;
+}
+
+static int find_highest_bits(int *dat)
+{
+ u32 bits, bitnum;
+ int i;
+
+ /* loop for all 256 bits */
+ for (i = 7; i >= 0 ; i--) {
+ bits = dat[i];
+ if (bits) {
+ bitnum = fls(bits);
+ return i * 32 + bitnum - 1;
+ }
+ }
+
+ return -1;
+}
+
+int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
+{
+ struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+
+ if (vpd->irr[0] & (1UL << NMI_VECTOR))
+ return NMI_VECTOR;
+ if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
+ return ExtINT_VECTOR;
+
+ return find_highest_bits((int *)&vpd->irr[0]);
+}
+
+int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ if (kvm_highest_pending_irq(vcpu) != -1)
+ return 1;
+ return 0;
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+ return gfn;
+}
+
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE;
+}
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL;
+}
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
new file mode 100644
index 000000000000..091f936c4485
--- /dev/null
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -0,0 +1,500 @@
+/*
+ * PAL/SAL call delegation
+ *
+ * Copyright (c) 2004 Li Susie <susie.li@intel.com>
+ * Copyright (c) 2005 Yu Ke <ke.yu@intel.com>
+ * Copyright (c) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/smp.h>
+
+#include "vti.h"
+#include "misc.h"
+
+#include <asm/pal.h>
+#include <asm/sal.h>
+#include <asm/tlb.h>
+
+/*
+ * Handy macros to make sure that the PAL return values start out
+ * as something meaningful.
+ */
+#define INIT_PAL_STATUS_UNIMPLEMENTED(x) \
+ { \
+ x.status = PAL_STATUS_UNIMPLEMENTED; \
+ x.v0 = 0; \
+ x.v1 = 0; \
+ x.v2 = 0; \
+ }
+
+#define INIT_PAL_STATUS_SUCCESS(x) \
+ { \
+ x.status = PAL_STATUS_SUCCESS; \
+ x.v0 = 0; \
+ x.v1 = 0; \
+ x.v2 = 0; \
+ }
+
+static void kvm_get_pal_call_data(struct kvm_vcpu *vcpu,
+ u64 *gr28, u64 *gr29, u64 *gr30, u64 *gr31) {
+ struct exit_ctl_data *p;
+
+ if (vcpu) {
+ p = &vcpu->arch.exit_data;
+ if (p->exit_reason == EXIT_REASON_PAL_CALL) {
+ *gr28 = p->u.pal_data.gr28;
+ *gr29 = p->u.pal_data.gr29;
+ *gr30 = p->u.pal_data.gr30;
+ *gr31 = p->u.pal_data.gr31;
+ return ;
+ }
+ }
+ printk(KERN_DEBUG"Failed to get vcpu pal data!!!\n");
+}
+
+static void set_pal_result(struct kvm_vcpu *vcpu,
+ struct ia64_pal_retval result) {
+
+ struct exit_ctl_data *p;
+
+ p = kvm_get_exit_data(vcpu);
+ if (p && p->exit_reason == EXIT_REASON_PAL_CALL) {
+ p->u.pal_data.ret = result;
+ return ;
+ }
+ INIT_PAL_STATUS_UNIMPLEMENTED(p->u.pal_data.ret);
+}
+
+static void set_sal_result(struct kvm_vcpu *vcpu,
+ struct sal_ret_values result) {
+ struct exit_ctl_data *p;
+
+ p = kvm_get_exit_data(vcpu);
+ if (p && p->exit_reason == EXIT_REASON_SAL_CALL) {
+ p->u.sal_data.ret = result;
+ return ;
+ }
+ printk(KERN_WARNING"Failed to set sal result!!\n");
+}
+
+struct cache_flush_args {
+ u64 cache_type;
+ u64 operation;
+ u64 progress;
+ long status;
+};
+
+cpumask_t cpu_cache_coherent_map;
+
+static void remote_pal_cache_flush(void *data)
+{
+ struct cache_flush_args *args = data;
+ long status;
+ u64 progress = args->progress;
+
+ status = ia64_pal_cache_flush(args->cache_type, args->operation,
+ &progress, NULL);
+ if (status != 0)
+ args->status = status;
+}
+
+static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu)
+{
+ u64 gr28, gr29, gr30, gr31;
+ struct ia64_pal_retval result = {0, 0, 0, 0};
+ struct cache_flush_args args = {0, 0, 0, 0};
+ long psr;
+
+ gr28 = gr29 = gr30 = gr31 = 0;
+ kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31);
+
+ if (gr31 != 0)
+ printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu);
+
+ /* Always call Host Pal in int=1 */
+ gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
+ args.cache_type = gr29;
+ args.operation = gr30;
+ smp_call_function(remote_pal_cache_flush,
+ (void *)&args, 1, 1);
+ if (args.status != 0)
+ printk(KERN_ERR"pal_cache_flush error!,"
+ "status:0x%lx\n", args.status);
+ /*
+ * Call Host PAL cache flush
+ * Clear psr.ic when call PAL_CACHE_FLUSH
+ */
+ local_irq_save(psr);
+ result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1,
+ &result.v0);
+ local_irq_restore(psr);
+ if (result.status != 0)
+ printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld"
+ "in1:%lx,in2:%lx\n",
+ vcpu, result.status, gr29, gr30);
+
+#if 0
+ if (gr29 == PAL_CACHE_TYPE_COHERENT) {
+ cpus_setall(vcpu->arch.cache_coherent_map);
+ cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map);
+ cpus_setall(cpu_cache_coherent_map);
+ cpu_clear(vcpu->cpu, cpu_cache_coherent_map);
+ }
+#endif
+ return result;
+}
+
+struct ia64_pal_retval pal_cache_summary(struct kvm_vcpu *vcpu)
+{
+
+ struct ia64_pal_retval result;
+
+ PAL_CALL(result, PAL_CACHE_SUMMARY, 0, 0, 0);
+ return result;
+}
+
+static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu)
+{
+
+ struct ia64_pal_retval result;
+
+ PAL_CALL(result, PAL_FREQ_BASE, 0, 0, 0);
+
+ /*
+ * PAL_FREQ_BASE may not be implemented in some platforms,
+ * call SAL instead.
+ */
+ if (result.v0 == 0) {
+ result.status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
+ &result.v0,
+ &result.v1);
+ result.v2 = 0;
+ }
+
+ return result;
+}
+
+static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
+{
+
+ struct ia64_pal_retval result;
+
+ PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0);
+ return result;
+}
+
+static struct ia64_pal_retval pal_logical_to_physica(struct kvm_vcpu *vcpu)
+{
+ struct ia64_pal_retval result;
+
+ INIT_PAL_STATUS_UNIMPLEMENTED(result);
+ return result;
+}
+
+static struct ia64_pal_retval pal_platform_addr(struct kvm_vcpu *vcpu)
+{
+
+ struct ia64_pal_retval result;
+
+ INIT_PAL_STATUS_SUCCESS(result);
+ return result;
+}
+
+static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu)
+{
+
+ struct ia64_pal_retval result = {0, 0, 0, 0};
+ long in0, in1, in2, in3;
+
+ kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
+ result.status = ia64_pal_proc_get_features(&result.v0, &result.v1,
+ &result.v2, in2);
+
+ return result;
+}
+
+static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu)
+{
+
+ pal_cache_config_info_t ci;
+ long status;
+ unsigned long in0, in1, in2, in3, r9, r10;
+
+ kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
+ status = ia64_pal_cache_config_info(in1, in2, &ci);
+ r9 = ci.pcci_info_1.pcci1_data;
+ r10 = ci.pcci_info_2.pcci2_data;
+ return ((struct ia64_pal_retval){status, r9, r10, 0});
+}
+
+#define GUEST_IMPL_VA_MSB 59
+#define GUEST_RID_BITS 18
+
+static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu)
+{
+
+ pal_vm_info_1_u_t vminfo1;
+ pal_vm_info_2_u_t vminfo2;
+ struct ia64_pal_retval result;
+
+ PAL_CALL(result, PAL_VM_SUMMARY, 0, 0, 0);
+ if (!result.status) {
+ vminfo1.pvi1_val = result.v0;
+ vminfo1.pal_vm_info_1_s.max_itr_entry = 8;
+ vminfo1.pal_vm_info_1_s.max_dtr_entry = 8;
+ result.v0 = vminfo1.pvi1_val;
+ vminfo2.pal_vm_info_2_s.impl_va_msb = GUEST_IMPL_VA_MSB;
+ vminfo2.pal_vm_info_2_s.rid_size = GUEST_RID_BITS;
+ result.v1 = vminfo2.pvi2_val;
+ }
+
+ return result;
+}
+
+static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu)
+{
+ struct ia64_pal_retval result;
+
+ INIT_PAL_STATUS_UNIMPLEMENTED(result);
+
+ return result;
+}
+
+static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
+{
+ u64 index = 0;
+ struct exit_ctl_data *p;
+
+ p = kvm_get_exit_data(vcpu);
+ if (p && (p->exit_reason == EXIT_REASON_PAL_CALL))
+ index = p->u.pal_data.gr28;
+
+ return index;
+}
+
+int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+
+ u64 gr28;
+ struct ia64_pal_retval result;
+ int ret = 1;
+
+ gr28 = kvm_get_pal_call_index(vcpu);
+ /*printk("pal_call index:%lx\n",gr28);*/
+ switch (gr28) {
+ case PAL_CACHE_FLUSH:
+ result = pal_cache_flush(vcpu);
+ break;
+ case PAL_CACHE_SUMMARY:
+ result = pal_cache_summary(vcpu);
+ break;
+ case PAL_HALT_LIGHT:
+ {
+ vcpu->arch.timer_pending = 1;
+ INIT_PAL_STATUS_SUCCESS(result);
+ if (kvm_highest_pending_irq(vcpu) == -1)
+ ret = kvm_emulate_halt(vcpu);
+
+ }
+ break;
+
+ case PAL_FREQ_RATIOS:
+ result = pal_freq_ratios(vcpu);
+ break;
+
+ case PAL_FREQ_BASE:
+ result = pal_freq_base(vcpu);
+ break;
+
+ case PAL_LOGICAL_TO_PHYSICAL :
+ result = pal_logical_to_physica(vcpu);
+ break;
+
+ case PAL_VM_SUMMARY :
+ result = pal_vm_summary(vcpu);
+ break;
+
+ case PAL_VM_INFO :
+ result = pal_vm_info(vcpu);
+ break;
+ case PAL_PLATFORM_ADDR :
+ result = pal_platform_addr(vcpu);
+ break;
+ case PAL_CACHE_INFO:
+ result = pal_cache_info(vcpu);
+ break;
+ case PAL_PTCE_INFO:
+ INIT_PAL_STATUS_SUCCESS(result);
+ result.v1 = (1L << 32) | 1L;
+ break;
+ case PAL_VM_PAGE_SIZE:
+ result.status = ia64_pal_vm_page_size(&result.v0,
+ &result.v1);
+ break;
+ case PAL_RSE_INFO:
+ result.status = ia64_pal_rse_info(&result.v0,
+ (pal_hints_u_t *)&result.v1);
+ break;
+ case PAL_PROC_GET_FEATURES:
+ result = pal_proc_get_features(vcpu);
+ break;
+ case PAL_DEBUG_INFO:
+ result.status = ia64_pal_debug_info(&result.v0,
+ &result.v1);
+ break;
+ case PAL_VERSION:
+ result.status = ia64_pal_version(
+ (pal_version_u_t *)&result.v0,
+ (pal_version_u_t *)&result.v1);
+
+ break;
+ case PAL_FIXED_ADDR:
+ result.status = PAL_STATUS_SUCCESS;
+ result.v0 = vcpu->vcpu_id;
+ break;
+ default:
+ INIT_PAL_STATUS_UNIMPLEMENTED(result);
+ printk(KERN_WARNING"kvm: Unsupported pal call,"
+ " index:0x%lx\n", gr28);
+ }
+ set_pal_result(vcpu, result);
+ return ret;
+}
+
+static struct sal_ret_values sal_emulator(struct kvm *kvm,
+ long index, unsigned long in1,
+ unsigned long in2, unsigned long in3,
+ unsigned long in4, unsigned long in5,
+ unsigned long in6, unsigned long in7)
+{
+ unsigned long r9 = 0;
+ unsigned long r10 = 0;
+ long r11 = 0;
+ long status;
+
+ status = 0;
+ switch (index) {
+ case SAL_FREQ_BASE:
+ status = ia64_sal_freq_base(in1, &r9, &r10);
+ break;
+ case SAL_PCI_CONFIG_READ:
+ printk(KERN_WARNING"kvm: Not allowed to call here!"
+ " SAL_PCI_CONFIG_READ\n");
+ break;
+ case SAL_PCI_CONFIG_WRITE:
+ printk(KERN_WARNING"kvm: Not allowed to call here!"
+ " SAL_PCI_CONFIG_WRITE\n");
+ break;
+ case SAL_SET_VECTORS:
+ if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
+ if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
+ status = -2;
+ } else {
+ kvm->arch.rdv_sal_data.boot_ip = in2;
+ kvm->arch.rdv_sal_data.boot_gp = in3;
+ }
+ printk("Rendvous called! iip:%lx\n\n", in2);
+ } else
+ printk(KERN_WARNING"kvm: CALLED SAL_SET_VECTORS %lu."
+ "ignored...\n", in1);
+ break;
+ case SAL_GET_STATE_INFO:
+ /* No more info. */
+ status = -5;
+ r9 = 0;
+ break;
+ case SAL_GET_STATE_INFO_SIZE:
+ /* Return a dummy size. */
+ status = 0;
+ r9 = 128;
+ break;
+ case SAL_CLEAR_STATE_INFO:
+ /* Noop. */
+ break;
+ case SAL_MC_RENDEZ:
+ printk(KERN_WARNING
+ "kvm: called SAL_MC_RENDEZ. ignored...\n");
+ break;
+ case SAL_MC_SET_PARAMS:
+ printk(KERN_WARNING
+ "kvm: called SAL_MC_SET_PARAMS.ignored!\n");
+ break;
+ case SAL_CACHE_FLUSH:
+ if (1) {
+ /*Flush using SAL.
+ This method is faster but has a side
+ effect on other vcpu running on
+ this cpu. */
+ status = ia64_sal_cache_flush(in1);
+ } else {
+ /*Maybe need to implement the method
+ without side effect!*/
+ status = 0;
+ }
+ break;
+ case SAL_CACHE_INIT:
+ printk(KERN_WARNING
+ "kvm: called SAL_CACHE_INIT. ignored...\n");
+ break;
+ case SAL_UPDATE_PAL:
+ printk(KERN_WARNING
+ "kvm: CALLED SAL_UPDATE_PAL. ignored...\n");
+ break;
+ default:
+ printk(KERN_WARNING"kvm: called SAL_CALL with unknown index."
+ " index:%ld\n", index);
+ status = -1;
+ break;
+ }
+ return ((struct sal_ret_values) {status, r9, r10, r11});
+}
+
+static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1,
+ u64 *in2, u64 *in3, u64 *in4, u64 *in5, u64 *in6, u64 *in7){
+
+ struct exit_ctl_data *p;
+
+ p = kvm_get_exit_data(vcpu);
+
+ if (p) {
+ if (p->exit_reason == EXIT_REASON_SAL_CALL) {
+ *in0 = p->u.sal_data.in0;
+ *in1 = p->u.sal_data.in1;
+ *in2 = p->u.sal_data.in2;
+ *in3 = p->u.sal_data.in3;
+ *in4 = p->u.sal_data.in4;
+ *in5 = p->u.sal_data.in5;
+ *in6 = p->u.sal_data.in6;
+ *in7 = p->u.sal_data.in7;
+ return ;
+ }
+ }
+ *in0 = 0;
+}
+
+void kvm_sal_emul(struct kvm_vcpu *vcpu)
+{
+
+ struct sal_ret_values result;
+ u64 index, in1, in2, in3, in4, in5, in6, in7;
+
+ kvm_get_sal_call_data(vcpu, &index, &in1, &in2,
+ &in3, &in4, &in5, &in6, &in7);
+ result = sal_emulator(vcpu->kvm, index, in1, in2, in3,
+ in4, in5, in6, in7);
+ set_sal_result(vcpu, result);
+}
diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h
new file mode 100644
index 000000000000..13980d9b8bcf
--- /dev/null
+++ b/arch/ia64/kvm/kvm_minstate.h
@@ -0,0 +1,273 @@
+/*
+ * kvm_minstate.h: min save macros
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
+ * Xiantao Zhang (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+
+#include <asm/asmmacro.h>
+#include <asm/types.h>
+#include <asm/kregs.h>
+#include "asm-offsets.h"
+
+#define KVM_MINSTATE_START_SAVE_MIN \
+ mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\
+ ;; \
+ mov.m r28 = ar.rnat; \
+ addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \
+ ;; \
+ lfetch.fault.excl.nt1 [r22]; \
+ addl r1 = IA64_STK_OFFSET-VMM_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
+ mov r23 = ar.bspstore; /* save ar.bspstore */ \
+ ;; \
+ mov ar.bspstore = r22; /* switch to kernel RBS */\
+ ;; \
+ mov r18 = ar.bsp; \
+ mov ar.rsc = 0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */
+
+
+
+#define KVM_MINSTATE_END_SAVE_MIN \
+ bsw.1; /* switch back to bank 1 (must be last in insn group) */\
+ ;;
+
+
+#define PAL_VSA_SYNC_READ \
+ /* begin to call pal vps sync_read */ \
+ add r25 = VMM_VPD_BASE_OFFSET, r21; \
+ adds r20 = VMM_VCPU_VSA_BASE_OFFSET, r21; /* entry point */ \
+ ;; \
+ ld8 r25 = [r25]; /* read vpd base */ \
+ ld8 r20 = [r20]; \
+ ;; \
+ add r20 = PAL_VPS_SYNC_READ,r20; \
+ ;; \
+{ .mii; \
+ nop 0x0; \
+ mov r24 = ip; \
+ mov b0 = r20; \
+ ;; \
+}; \
+{ .mmb; \
+ add r24 = 0x20, r24; \
+ nop 0x0; \
+ br.cond.sptk b0; /* call the service */ \
+ ;; \
+};
+
+
+
+#define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21
+
+/*
+ * KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
+ * the minimum state necessary that allows us to turn psr.ic back
+ * on.
+ *
+ * Assumed state upon entry:
+ * psr.ic: off
+ * r31: contains saved predicates (pr)
+ *
+ * Upon exit, the state is as follows:
+ * psr.ic: off
+ * r2 = points to &pt_regs.r16
+ * r8 = contents of ar.ccv
+ * r9 = contents of ar.csd
+ * r10 = contents of ar.ssd
+ * r11 = FPSR_DEFAULT
+ * r12 = kernel sp (kernel virtual address)
+ * r13 = points to current task_struct (kernel virtual address)
+ * p15 = TRUE if psr.i is set in cr.ipsr
+ * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
+ * preserved
+ *
+ * Note that psr.ic is NOT turned on by this macro. This is so that
+ * we can pass interruption state as arguments to a handler.
+ */
+
+
+#define PT(f) (VMM_PT_REGS_##f##_OFFSET)
+
+#define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
+ KVM_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
+ mov r27 = ar.rsc; /* M */ \
+ mov r20 = r1; /* A */ \
+ mov r25 = ar.unat; /* M */ \
+ mov r29 = cr.ipsr; /* M */ \
+ mov r26 = ar.pfs; /* I */ \
+ mov r18 = cr.isr; \
+ COVER; /* B;; (or nothing) */ \
+ ;; \
+ tbit.z p0,p15 = r29,IA64_PSR_I_BIT; \
+ mov r1 = r16; \
+/* mov r21=r16; */ \
+ /* switch from user to kernel RBS: */ \
+ ;; \
+ invala; /* M */ \
+ SAVE_IFS; \
+ ;; \
+ KVM_MINSTATE_START_SAVE_MIN \
+ adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */ \
+ adds r16 = PT(CR_IPSR),r1; \
+ ;; \
+ lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
+ st8 [r16] = r29; /* save cr.ipsr */ \
+ ;; \
+ lfetch.fault.excl.nt1 [r17]; \
+ tbit.nz p15,p0 = r29,IA64_PSR_I_BIT; \
+ mov r29 = b0 \
+ ;; \
+ adds r16 = PT(R8),r1; /* initialize first base pointer */\
+ adds r17 = PT(R9),r1; /* initialize second base pointer */\
+ ;; \
+.mem.offset 0,0; st8.spill [r16] = r8,16; \
+.mem.offset 8,0; st8.spill [r17] = r9,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r16] = r10,24; \
+.mem.offset 8,0; st8.spill [r17] = r11,24; \
+ ;; \
+ mov r9 = cr.iip; /* M */ \
+ mov r10 = ar.fpsr; /* M */ \
+ ;; \
+ st8 [r16] = r9,16; /* save cr.iip */ \
+ st8 [r17] = r30,16; /* save cr.ifs */ \
+ sub r18 = r18,r22; /* r18=RSE.ndirty*8 */ \
+ ;; \
+ st8 [r16] = r25,16; /* save ar.unat */ \
+ st8 [r17] = r26,16; /* save ar.pfs */ \
+ shl r18 = r18,16; /* calu ar.rsc used for "loadrs" */\
+ ;; \
+ st8 [r16] = r27,16; /* save ar.rsc */ \
+ st8 [r17] = r28,16; /* save ar.rnat */ \
+ ;; /* avoid RAW on r16 & r17 */ \
+ st8 [r16] = r23,16; /* save ar.bspstore */ \
+ st8 [r17] = r31,16; /* save predicates */ \
+ ;; \
+ st8 [r16] = r29,16; /* save b0 */ \
+ st8 [r17] = r18,16; /* save ar.rsc value for "loadrs" */\
+ ;; \
+.mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */ \
+.mem.offset 8,0; st8.spill [r17] = r12,16; \
+ adds r12 = -16,r1; /* switch to kernel memory stack */ \
+ ;; \
+.mem.offset 0,0; st8.spill [r16] = r13,16; \
+.mem.offset 8,0; st8.spill [r17] = r10,16; /* save ar.fpsr */\
+ mov r13 = r21; /* establish `current' */ \
+ ;; \
+.mem.offset 0,0; st8.spill [r16] = r15,16; \
+.mem.offset 8,0; st8.spill [r17] = r14,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r16] = r2,16; \
+.mem.offset 8,0; st8.spill [r17] = r3,16; \
+ adds r2 = VMM_PT_REGS_R16_OFFSET,r1; \
+ ;; \
+ adds r16 = VMM_VCPU_IIPA_OFFSET,r13; \
+ adds r17 = VMM_VCPU_ISR_OFFSET,r13; \
+ mov r26 = cr.iipa; \
+ mov r27 = cr.isr; \
+ ;; \
+ st8 [r16] = r26; \
+ st8 [r17] = r27; \
+ ;; \
+ EXTRA; \
+ mov r8 = ar.ccv; \
+ mov r9 = ar.csd; \
+ mov r10 = ar.ssd; \
+ movl r11 = FPSR_DEFAULT; /* L-unit */ \
+ adds r17 = VMM_VCPU_GP_OFFSET,r13; \
+ ;; \
+ ld8 r1 = [r17];/* establish kernel global pointer */ \
+ ;; \
+ PAL_VSA_SYNC_READ \
+ KVM_MINSTATE_END_SAVE_MIN
+
+/*
+ * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
+ *
+ * Assumed state upon entry:
+ * psr.ic: on
+ * r2: points to &pt_regs.f6
+ * r3: points to &pt_regs.f7
+ * r8: contents of ar.ccv
+ * r9: contents of ar.csd
+ * r10: contents of ar.ssd
+ * r11: FPSR_DEFAULT
+ *
+ * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
+ */
+#define KVM_SAVE_REST \
+.mem.offset 0,0; st8.spill [r2] = r16,16; \
+.mem.offset 8,0; st8.spill [r3] = r17,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2] = r18,16; \
+.mem.offset 8,0; st8.spill [r3] = r19,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2] = r20,16; \
+.mem.offset 8,0; st8.spill [r3] = r21,16; \
+ mov r18=b6; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2] = r22,16; \
+.mem.offset 8,0; st8.spill [r3] = r23,16; \
+ mov r19 = b7; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2] = r24,16; \
+.mem.offset 8,0; st8.spill [r3] = r25,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2] = r26,16; \
+.mem.offset 8,0; st8.spill [r3] = r27,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2] = r28,16; \
+.mem.offset 8,0; st8.spill [r3] = r29,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2] = r30,16; \
+.mem.offset 8,0; st8.spill [r3] = r31,32; \
+ ;; \
+ mov ar.fpsr = r11; \
+ st8 [r2] = r8,8; \
+ adds r24 = PT(B6)-PT(F7),r3; \
+ adds r25 = PT(B7)-PT(F7),r3; \
+ ;; \
+ st8 [r24] = r18,16; /* b6 */ \
+ st8 [r25] = r19,16; /* b7 */ \
+ adds r2 = PT(R4)-PT(F6),r2; \
+ adds r3 = PT(R5)-PT(F7),r3; \
+ ;; \
+ st8 [r24] = r9; /* ar.csd */ \
+ st8 [r25] = r10; /* ar.ssd */ \
+ ;; \
+ mov r18 = ar.unat; \
+ adds r19 = PT(EML_UNAT)-PT(R4),r2; \
+ ;; \
+ st8 [r19] = r18; /* eml_unat */ \
+
+
+#define KVM_SAVE_EXTRA \
+.mem.offset 0,0; st8.spill [r2] = r4,16; \
+.mem.offset 8,0; st8.spill [r3] = r5,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2] = r6,16; \
+.mem.offset 8,0; st8.spill [r3] = r7; \
+ ;; \
+ mov r26 = ar.unat; \
+ ;; \
+ st8 [r2] = r26;/* eml_unat */ \
+
+#define KVM_SAVE_MIN_WITH_COVER KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,)
+#define KVM_SAVE_MIN_WITH_COVER_R19 KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19)
+#define KVM_SAVE_MIN KVM_DO_SAVE_MIN( , mov r30 = r0, )
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h
new file mode 100644
index 000000000000..6d6cbcb14893
--- /dev/null
+++ b/arch/ia64/kvm/lapic.h
@@ -0,0 +1,25 @@
+#ifndef __KVM_IA64_LAPIC_H
+#define __KVM_IA64_LAPIC_H
+
+#include <linux/kvm_host.h>
+
+/*
+ * vlsapic
+ */
+struct kvm_lapic{
+ struct kvm_vcpu *vcpu;
+ uint64_t insvc[4];
+ uint64_t vhpi;
+ uint8_t xtp;
+ uint8_t pal_init_pending;
+ uint8_t pad[2];
+};
+
+int kvm_create_lapic(struct kvm_vcpu *vcpu);
+void kvm_free_lapic(struct kvm_vcpu *vcpu);
+
+int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
+int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig);
+
+#endif
diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h
new file mode 100644
index 000000000000..e585c4607344
--- /dev/null
+++ b/arch/ia64/kvm/misc.h
@@ -0,0 +1,93 @@
+#ifndef __KVM_IA64_MISC_H
+#define __KVM_IA64_MISC_H
+
+#include <linux/kvm_host.h>
+/*
+ * misc.h
+ * Copyright (C) 2007, Intel Corporation.
+ * Xiantao Zhang (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+/*
+ *Return p2m base address at host side!
+ */
+static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm)
+{
+ return (uint64_t *)(kvm->arch.vm_base + KVM_P2M_OFS);
+}
+
+static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn,
+ u64 paddr, u64 mem_flags)
+{
+ uint64_t *pmt_base = kvm_host_get_pmt(kvm);
+ unsigned long pte;
+
+ pte = PAGE_ALIGN(paddr) | mem_flags;
+ pmt_base[gfn] = pte;
+}
+
+/*Function for translating host address to guest address*/
+
+static inline void *to_guest(struct kvm *kvm, void *addr)
+{
+ return (void *)((unsigned long)(addr) - kvm->arch.vm_base +
+ KVM_VM_DATA_BASE);
+}
+
+/*Function for translating guest address to host address*/
+
+static inline void *to_host(struct kvm *kvm, void *addr)
+{
+ return (void *)((unsigned long)addr - KVM_VM_DATA_BASE
+ + kvm->arch.vm_base);
+}
+
+/* Get host context of the vcpu */
+static inline union context *kvm_get_host_context(struct kvm_vcpu *vcpu)
+{
+ union context *ctx = &vcpu->arch.host;
+ return to_guest(vcpu->kvm, ctx);
+}
+
+/* Get guest context of the vcpu */
+static inline union context *kvm_get_guest_context(struct kvm_vcpu *vcpu)
+{
+ union context *ctx = &vcpu->arch.guest;
+ return to_guest(vcpu->kvm, ctx);
+}
+
+/* kvm get exit data from gvmm! */
+static inline struct exit_ctl_data *kvm_get_exit_data(struct kvm_vcpu *vcpu)
+{
+ return &vcpu->arch.exit_data;
+}
+
+/*kvm get vcpu ioreq for kvm module!*/
+static inline struct kvm_mmio_req *kvm_get_vcpu_ioreq(struct kvm_vcpu *vcpu)
+{
+ struct exit_ctl_data *p_ctl_data;
+
+ if (vcpu) {
+ p_ctl_data = kvm_get_exit_data(vcpu);
+ if (p_ctl_data->exit_reason == EXIT_REASON_MMIO_INSTRUCTION)
+ return &p_ctl_data->u.ioreq;
+ }
+
+ return NULL;
+}
+
+#endif
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
new file mode 100644
index 000000000000..351bf70da463
--- /dev/null
+++ b/arch/ia64/kvm/mmio.c
@@ -0,0 +1,341 @@
+/*
+ * mmio.c: MMIO emulation components.
+ * Copyright (c) 2004, Intel Corporation.
+ * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
+ * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
+ *
+ * Copyright (c) 2007 Intel Corporation KVM support.
+ * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
+ * Xiantao Zhang (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <linux/kvm_host.h>
+
+#include "vcpu.h"
+
+static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val)
+{
+ VLSAPIC_XTP(v) = val;
+}
+
+/*
+ * LSAPIC OFFSET
+ */
+#define PIB_LOW_HALF(ofst) !(ofst & (1 << 20))
+#define PIB_OFST_INTA 0x1E0000
+#define PIB_OFST_XTP 0x1E0008
+
+/*
+ * execute write IPI op.
+ */
+static void vlsapic_write_ipi(struct kvm_vcpu *vcpu,
+ uint64_t addr, uint64_t data)
+{
+ struct exit_ctl_data *p = &current_vcpu->arch.exit_data;
+ unsigned long psr;
+
+ local_irq_save(psr);
+
+ p->exit_reason = EXIT_REASON_IPI;
+ p->u.ipi_data.addr.val = addr;
+ p->u.ipi_data.data.val = data;
+ vmm_transition(current_vcpu);
+
+ local_irq_restore(psr);
+
+}
+
+void lsapic_write(struct kvm_vcpu *v, unsigned long addr,
+ unsigned long length, unsigned long val)
+{
+ addr &= (PIB_SIZE - 1);
+
+ switch (addr) {
+ case PIB_OFST_INTA:
+ /*panic_domain(NULL, "Undefined write on PIB INTA\n");*/
+ panic_vm(v);
+ break;
+ case PIB_OFST_XTP:
+ if (length == 1) {
+ vlsapic_write_xtp(v, val);
+ } else {
+ /*panic_domain(NULL,
+ "Undefined write on PIB XTP\n");*/
+ panic_vm(v);
+ }
+ break;
+ default:
+ if (PIB_LOW_HALF(addr)) {
+ /*lower half */
+ if (length != 8)
+ /*panic_domain(NULL,
+ "Can't LHF write with size %ld!\n",
+ length);*/
+ panic_vm(v);
+ else
+ vlsapic_write_ipi(v, addr, val);
+ } else { /* upper half
+ printk("IPI-UHF write %lx\n",addr);*/
+ panic_vm(v);
+ }
+ break;
+ }
+}
+
+unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr,
+ unsigned long length)
+{
+ uint64_t result = 0;
+
+ addr &= (PIB_SIZE - 1);
+
+ switch (addr) {
+ case PIB_OFST_INTA:
+ if (length == 1) /* 1 byte load */
+ ; /* There is no i8259, there is no INTA access*/
+ else
+ /*panic_domain(NULL,"Undefined read on PIB INTA\n"); */
+ panic_vm(v);
+
+ break;
+ case PIB_OFST_XTP:
+ if (length == 1) {
+ result = VLSAPIC_XTP(v);
+ /* printk("read xtp %lx\n", result); */
+ } else {
+ /*panic_domain(NULL,
+ "Undefined read on PIB XTP\n");*/
+ panic_vm(v);
+ }
+ break;
+ default:
+ panic_vm(v);
+ break;
+ }
+ return result;
+}
+
+static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
+ u16 s, int ma, int dir)
+{
+ unsigned long iot;
+ struct exit_ctl_data *p = &vcpu->arch.exit_data;
+ unsigned long psr;
+
+ iot = __gpfn_is_io(src_pa >> PAGE_SHIFT);
+
+ local_irq_save(psr);
+
+ /*Intercept the acces for PIB range*/
+ if (iot == GPFN_PIB) {
+ if (!dir)
+ lsapic_write(vcpu, src_pa, s, *dest);
+ else
+ *dest = lsapic_read(vcpu, src_pa, s);
+ goto out;
+ }
+ p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION;
+ p->u.ioreq.addr = src_pa;
+ p->u.ioreq.size = s;
+ p->u.ioreq.dir = dir;
+ if (dir == IOREQ_WRITE)
+ p->u.ioreq.data = *dest;
+ p->u.ioreq.state = STATE_IOREQ_READY;
+ vmm_transition(vcpu);
+
+ if (p->u.ioreq.state == STATE_IORESP_READY) {
+ if (dir == IOREQ_READ)
+ *dest = p->u.ioreq.data;
+ } else
+ panic_vm(vcpu);
+out:
+ local_irq_restore(psr);
+ return ;
+}
+
+/*
+ dir 1: read 0:write
+ inst_type 0:integer 1:floating point
+ */
+#define SL_INTEGER 0 /* store/load interger*/
+#define SL_FLOATING 1 /* store/load floating*/
+
+void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
+{
+ struct kvm_pt_regs *regs;
+ IA64_BUNDLE bundle;
+ int slot, dir = 0;
+ int inst_type = -1;
+ u16 size = 0;
+ u64 data, slot1a, slot1b, temp, update_reg;
+ s32 imm;
+ INST64 inst;
+
+ regs = vcpu_regs(vcpu);
+
+ if (fetch_code(vcpu, regs->cr_iip, &bundle)) {
+ /* if fetch code fail, return and try again */
+ return;
+ }
+ slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
+ if (!slot)
+ inst.inst = bundle.slot0;
+ else if (slot == 1) {
+ slot1a = bundle.slot1a;
+ slot1b = bundle.slot1b;
+ inst.inst = slot1a + (slot1b << 18);
+ } else if (slot == 2)
+ inst.inst = bundle.slot2;
+
+ /* Integer Load/Store */
+ if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
+ inst_type = SL_INTEGER;
+ size = (inst.M1.x6 & 0x3);
+ if ((inst.M1.x6 >> 2) > 0xb) {
+ /*write*/
+ dir = IOREQ_WRITE;
+ data = vcpu_get_gr(vcpu, inst.M4.r2);
+ } else if ((inst.M1.x6 >> 2) < 0xb) {
+ /*read*/
+ dir = IOREQ_READ;
+ }
+ } else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
+ /* Integer Load + Reg update */
+ inst_type = SL_INTEGER;
+ dir = IOREQ_READ;
+ size = (inst.M2.x6 & 0x3);
+ temp = vcpu_get_gr(vcpu, inst.M2.r3);
+ update_reg = vcpu_get_gr(vcpu, inst.M2.r2);
+ temp += update_reg;
+ vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
+ } else if (inst.M3.major == 5) {
+ /*Integer Load/Store + Imm update*/
+ inst_type = SL_INTEGER;
+ size = (inst.M3.x6&0x3);
+ if ((inst.M5.x6 >> 2) > 0xb) {
+ /*write*/
+ dir = IOREQ_WRITE;
+ data = vcpu_get_gr(vcpu, inst.M5.r2);
+ temp = vcpu_get_gr(vcpu, inst.M5.r3);
+ imm = (inst.M5.s << 31) | (inst.M5.i << 30) |
+ (inst.M5.imm7 << 23);
+ temp += imm >> 23;
+ vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
+
+ } else if ((inst.M3.x6 >> 2) < 0xb) {
+ /*read*/
+ dir = IOREQ_READ;
+ temp = vcpu_get_gr(vcpu, inst.M3.r3);
+ imm = (inst.M3.s << 31) | (inst.M3.i << 30) |
+ (inst.M3.imm7 << 23);
+ temp += imm >> 23;
+ vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
+
+ }
+ } else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B
+ && inst.M9.m == 0 && inst.M9.x == 0) {
+ /* Floating-point spill*/
+ struct ia64_fpreg v;
+
+ inst_type = SL_FLOATING;
+ dir = IOREQ_WRITE;
+ vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
+ /* Write high word. FIXME: this is a kludge! */
+ v.u.bits[1] &= 0x3ffff;
+ mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
+ data = v.u.bits[0];
+ size = 3;
+ } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
+ /* Floating-point spill + Imm update */
+ struct ia64_fpreg v;
+
+ inst_type = SL_FLOATING;
+ dir = IOREQ_WRITE;
+ vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
+ temp = vcpu_get_gr(vcpu, inst.M10.r3);
+ imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
+ (inst.M10.imm7 << 23);
+ temp += imm >> 23;
+ vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
+
+ /* Write high word.FIXME: this is a kludge! */
+ v.u.bits[1] &= 0x3ffff;
+ mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
+ data = v.u.bits[0];
+ size = 3;
+ } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
+ /* Floating-point stf8 + Imm update */
+ struct ia64_fpreg v;
+ inst_type = SL_FLOATING;
+ dir = IOREQ_WRITE;
+ size = 3;
+ vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
+ data = v.u.bits[0]; /* Significand. */
+ temp = vcpu_get_gr(vcpu, inst.M10.r3);
+ imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
+ (inst.M10.imm7 << 23);
+ temp += imm >> 23;
+ vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
+ } else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c
+ && inst.M15.x6 <= 0x2f) {
+ temp = vcpu_get_gr(vcpu, inst.M15.r3);
+ imm = (inst.M15.s << 31) | (inst.M15.i << 30) |
+ (inst.M15.imm7 << 23);
+ temp += imm >> 23;
+ vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
+
+ vcpu_increment_iip(vcpu);
+ return;
+ } else if (inst.M12.major == 6 && inst.M12.m == 1
+ && inst.M12.x == 1 && inst.M12.x6 == 1) {
+ /* Floating-point Load Pair + Imm ldfp8 M12*/
+ struct ia64_fpreg v;
+
+ inst_type = SL_FLOATING;
+ dir = IOREQ_READ;
+ size = 8; /*ldfd*/
+ mmio_access(vcpu, padr, &data, size, ma, dir);
+ v.u.bits[0] = data;
+ v.u.bits[1] = 0x1003E;
+ vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
+ padr += 8;
+ mmio_access(vcpu, padr, &data, size, ma, dir);
+ v.u.bits[0] = data;
+ v.u.bits[1] = 0x1003E;
+ vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
+ padr += 8;
+ vcpu_set_gr(vcpu, inst.M12.r3, padr, 0);
+ vcpu_increment_iip(vcpu);
+ return;
+ } else {
+ inst_type = -1;
+ panic_vm(vcpu);
+ }
+
+ size = 1 << size;
+ if (dir == IOREQ_WRITE) {
+ mmio_access(vcpu, padr, &data, size, ma, dir);
+ } else {
+ mmio_access(vcpu, padr, &data, size, ma, dir);
+ if (inst_type == SL_INTEGER)
+ vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
+ else
+ panic_vm(vcpu);
+
+ }
+ vcpu_increment_iip(vcpu);
+}
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S
new file mode 100644
index 000000000000..e4f15d641b22
--- /dev/null
+++ b/arch/ia64/kvm/optvfault.S
@@ -0,0 +1,918 @@
+/*
+ * arch/ia64/vmx/optvfault.S
+ * optimize virtualization fault handler
+ *
+ * Copyright (C) 2006 Intel Co
+ * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
+ */
+
+#include <asm/asmmacro.h>
+#include <asm/processor.h>
+
+#include "vti.h"
+#include "asm-offsets.h"
+
+#define ACCE_MOV_FROM_AR
+#define ACCE_MOV_FROM_RR
+#define ACCE_MOV_TO_RR
+#define ACCE_RSM
+#define ACCE_SSM
+#define ACCE_MOV_TO_PSR
+#define ACCE_THASH
+
+//mov r1=ar3
+GLOBAL_ENTRY(kvm_asm_mov_from_ar)
+#ifndef ACCE_MOV_FROM_AR
+ br.many kvm_virtualization_fault_back
+#endif
+ add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
+ add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
+ extr.u r17=r25,6,7
+ ;;
+ ld8 r18=[r18]
+ mov r19=ar.itc
+ mov r24=b0
+ ;;
+ add r19=r19,r18
+ addl r20=@gprel(asm_mov_to_reg),gp
+ ;;
+ st8 [r16] = r19
+ adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
+ shladd r17=r17,4,r20
+ ;;
+ mov b0=r17
+ br.sptk.few b0
+ ;;
+END(kvm_asm_mov_from_ar)
+
+
+// mov r1=rr[r3]
+GLOBAL_ENTRY(kvm_asm_mov_from_rr)
+#ifndef ACCE_MOV_FROM_RR
+ br.many kvm_virtualization_fault_back
+#endif
+ extr.u r16=r25,20,7
+ extr.u r17=r25,6,7
+ addl r20=@gprel(asm_mov_from_reg),gp
+ ;;
+ adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
+ shladd r16=r16,4,r20
+ mov r24=b0
+ ;;
+ add r27=VMM_VCPU_VRR0_OFFSET,r21
+ mov b0=r16
+ br.many b0
+ ;;
+kvm_asm_mov_from_rr_back_1:
+ adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
+ adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
+ shr.u r26=r19,61
+ ;;
+ shladd r17=r17,4,r22
+ shladd r27=r26,3,r27
+ ;;
+ ld8 r19=[r27]
+ mov b0=r17
+ br.many b0
+END(kvm_asm_mov_from_rr)
+
+
+// mov rr[r3]=r2
+GLOBAL_ENTRY(kvm_asm_mov_to_rr)
+#ifndef ACCE_MOV_TO_RR
+ br.many kvm_virtualization_fault_back
+#endif
+ extr.u r16=r25,20,7
+ extr.u r17=r25,13,7
+ addl r20=@gprel(asm_mov_from_reg),gp
+ ;;
+ adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
+ shladd r16=r16,4,r20
+ mov r22=b0
+ ;;
+ add r27=VMM_VCPU_VRR0_OFFSET,r21
+ mov b0=r16
+ br.many b0
+ ;;
+kvm_asm_mov_to_rr_back_1:
+ adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
+ shr.u r23=r19,61
+ shladd r17=r17,4,r20
+ ;;
+ //if rr6, go back
+ cmp.eq p6,p0=6,r23
+ mov b0=r22
+ (p6) br.cond.dpnt.many kvm_virtualization_fault_back
+ ;;
+ mov r28=r19
+ mov b0=r17
+ br.many b0
+kvm_asm_mov_to_rr_back_2:
+ adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
+ shladd r27=r23,3,r27
+ ;; // vrr.rid<<4 |0xe
+ st8 [r27]=r19
+ mov b0=r30
+ ;;
+ extr.u r16=r19,8,26
+ extr.u r18 =r19,2,6
+ mov r17 =0xe
+ ;;
+ shladd r16 = r16, 4, r17
+ extr.u r19 =r19,0,8
+ ;;
+ shl r16 = r16,8
+ ;;
+ add r19 = r19, r16
+ ;; //set ve 1
+ dep r19=-1,r19,0,1
+ cmp.lt p6,p0=14,r18
+ ;;
+ (p6) mov r18=14
+ ;;
+ (p6) dep r19=r18,r19,2,6
+ ;;
+ cmp.eq p6,p0=0,r23
+ ;;
+ cmp.eq.or p6,p0=4,r23
+ ;;
+ adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21
+ (p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
+ ;;
+ ld4 r16=[r16]
+ cmp.eq p7,p0=r0,r0
+ (p6) shladd r17=r23,1,r17
+ ;;
+ (p6) st8 [r17]=r19
+ (p6) tbit.nz p6,p7=r16,0
+ ;;
+ (p7) mov rr[r28]=r19
+ mov r24=r22
+ br.many b0
+END(kvm_asm_mov_to_rr)
+
+
+//rsm
+GLOBAL_ENTRY(kvm_asm_rsm)
+#ifndef ACCE_RSM
+ br.many kvm_virtualization_fault_back
+#endif
+ add r16=VMM_VPD_BASE_OFFSET,r21
+ extr.u r26=r25,6,21
+ extr.u r27=r25,31,2
+ ;;
+ ld8 r16=[r16]
+ extr.u r28=r25,36,1
+ dep r26=r27,r26,21,2
+ ;;
+ add r17=VPD_VPSR_START_OFFSET,r16
+ add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
+ //r26 is imm24
+ dep r26=r28,r26,23,1
+ ;;
+ ld8 r18=[r17]
+ movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
+ ld4 r23=[r22]
+ sub r27=-1,r26
+ mov r24=b0
+ ;;
+ mov r20=cr.ipsr
+ or r28=r27,r28
+ and r19=r18,r27
+ ;;
+ st8 [r17]=r19
+ and r20=r20,r28
+ /* Comment it out due to short of fp lazy alorgithm support
+ adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
+ ;;
+ ld8 r27=[r27]
+ ;;
+ tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
+ ;;
+ (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
+ */
+ ;;
+ mov cr.ipsr=r20
+ tbit.nz p6,p0=r23,0
+ ;;
+ tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
+ (p6) br.dptk kvm_resume_to_guest
+ ;;
+ add r26=VMM_VCPU_META_RR0_OFFSET,r21
+ add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
+ dep r23=-1,r23,0,1
+ ;;
+ ld8 r26=[r26]
+ ld8 r27=[r27]
+ st4 [r22]=r23
+ dep.z r28=4,61,3
+ ;;
+ mov rr[r0]=r26
+ ;;
+ mov rr[r28]=r27
+ ;;
+ srlz.d
+ br.many kvm_resume_to_guest
+END(kvm_asm_rsm)
+
+
+//ssm
+GLOBAL_ENTRY(kvm_asm_ssm)
+#ifndef ACCE_SSM
+ br.many kvm_virtualization_fault_back
+#endif
+ add r16=VMM_VPD_BASE_OFFSET,r21
+ extr.u r26=r25,6,21
+ extr.u r27=r25,31,2
+ ;;
+ ld8 r16=[r16]
+ extr.u r28=r25,36,1
+ dep r26=r27,r26,21,2
+ ;; //r26 is imm24
+ add r27=VPD_VPSR_START_OFFSET,r16
+ dep r26=r28,r26,23,1
+ ;; //r19 vpsr
+ ld8 r29=[r27]
+ mov r24=b0
+ ;;
+ add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
+ mov r20=cr.ipsr
+ or r19=r29,r26
+ ;;
+ ld4 r23=[r22]
+ st8 [r27]=r19
+ or r20=r20,r26
+ ;;
+ mov cr.ipsr=r20
+ movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
+ ;;
+ and r19=r28,r19
+ tbit.z p6,p0=r23,0
+ ;;
+ cmp.ne.or p6,p0=r28,r19
+ (p6) br.dptk kvm_asm_ssm_1
+ ;;
+ add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
+ add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
+ dep r23=0,r23,0,1
+ ;;
+ ld8 r26=[r26]
+ ld8 r27=[r27]
+ st4 [r22]=r23
+ dep.z r28=4,61,3
+ ;;
+ mov rr[r0]=r26
+ ;;
+ mov rr[r28]=r27
+ ;;
+ srlz.d
+ ;;
+kvm_asm_ssm_1:
+ tbit.nz p6,p0=r29,IA64_PSR_I_BIT
+ ;;
+ tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
+ (p6) br.dptk kvm_resume_to_guest
+ ;;
+ add r29=VPD_VTPR_START_OFFSET,r16
+ add r30=VPD_VHPI_START_OFFSET,r16
+ ;;
+ ld8 r29=[r29]
+ ld8 r30=[r30]
+ ;;
+ extr.u r17=r29,4,4
+ extr.u r18=r29,16,1
+ ;;
+ dep r17=r18,r17,4,1
+ ;;
+ cmp.gt p6,p0=r30,r17
+ (p6) br.dpnt.few kvm_asm_dispatch_vexirq
+ br.many kvm_resume_to_guest
+END(kvm_asm_ssm)
+
+
+//mov psr.l=r2
+GLOBAL_ENTRY(kvm_asm_mov_to_psr)
+#ifndef ACCE_MOV_TO_PSR
+ br.many kvm_virtualization_fault_back
+#endif
+ add r16=VMM_VPD_BASE_OFFSET,r21
+ extr.u r26=r25,13,7 //r2
+ ;;
+ ld8 r16=[r16]
+ addl r20=@gprel(asm_mov_from_reg),gp
+ ;;
+ adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
+ shladd r26=r26,4,r20
+ mov r24=b0
+ ;;
+ add r27=VPD_VPSR_START_OFFSET,r16
+ mov b0=r26
+ br.many b0
+ ;;
+kvm_asm_mov_to_psr_back:
+ ld8 r17=[r27]
+ add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
+ dep r19=0,r19,32,32
+ ;;
+ ld4 r23=[r22]
+ dep r18=0,r17,0,32
+ ;;
+ add r30=r18,r19
+ movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
+ ;;
+ st8 [r27]=r30
+ and r27=r28,r30
+ and r29=r28,r17
+ ;;
+ cmp.eq p5,p0=r29,r27
+ cmp.eq p6,p7=r28,r27
+ (p5) br.many kvm_asm_mov_to_psr_1
+ ;;
+ //virtual to physical
+ (p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21
+ (p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
+ (p7) dep r23=-1,r23,0,1
+ ;;
+ //physical to virtual
+ (p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
+ (p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
+ (p6) dep r23=0,r23,0,1
+ ;;
+ ld8 r26=[r26]
+ ld8 r27=[r27]
+ st4 [r22]=r23
+ dep.z r28=4,61,3
+ ;;
+ mov rr[r0]=r26
+ ;;
+ mov rr[r28]=r27
+ ;;
+ srlz.d
+ ;;
+kvm_asm_mov_to_psr_1:
+ mov r20=cr.ipsr
+ movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT
+ ;;
+ or r19=r19,r28
+ dep r20=0,r20,0,32
+ ;;
+ add r20=r19,r20
+ mov b0=r24
+ ;;
+ /* Comment it out due to short of fp lazy algorithm support
+ adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
+ ;;
+ ld8 r27=[r27]
+ ;;
+ tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
+ ;;
+ (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
+ ;;
+ */
+ mov cr.ipsr=r20
+ cmp.ne p6,p0=r0,r0
+ ;;
+ tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
+ tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
+ (p6) br.dpnt.few kvm_resume_to_guest
+ ;;
+ add r29=VPD_VTPR_START_OFFSET,r16
+ add r30=VPD_VHPI_START_OFFSET,r16
+ ;;
+ ld8 r29=[r29]
+ ld8 r30=[r30]
+ ;;
+ extr.u r17=r29,4,4
+ extr.u r18=r29,16,1
+ ;;
+ dep r17=r18,r17,4,1
+ ;;
+ cmp.gt p6,p0=r30,r17
+ (p6) br.dpnt.few kvm_asm_dispatch_vexirq
+ br.many kvm_resume_to_guest
+END(kvm_asm_mov_to_psr)
+
+
+ENTRY(kvm_asm_dispatch_vexirq)
+//increment iip
+ mov r16=cr.ipsr
+ ;;
+ extr.u r17=r16,IA64_PSR_RI_BIT,2
+ tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
+ ;;
+ (p6) mov r18=cr.iip
+ (p6) mov r17=r0
+ (p7) add r17=1,r17
+ ;;
+ (p6) add r18=0x10,r18
+ dep r16=r17,r16,IA64_PSR_RI_BIT,2
+ ;;
+ (p6) mov cr.iip=r18
+ mov cr.ipsr=r16
+ mov r30 =1
+ br.many kvm_dispatch_vexirq
+END(kvm_asm_dispatch_vexirq)
+
+// thash
+// TODO: add support when pta.vf = 1
+GLOBAL_ENTRY(kvm_asm_thash)
+#ifndef ACCE_THASH
+ br.many kvm_virtualization_fault_back
+#endif
+ extr.u r17=r25,20,7 // get r3 from opcode in r25
+ extr.u r18=r25,6,7 // get r1 from opcode in r25
+ addl r20=@gprel(asm_mov_from_reg),gp
+ ;;
+ adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20
+ shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17)
+ adds r16=VMM_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs
+ ;;
+ mov r24=b0
+ ;;
+ ld8 r16=[r16] // get VPD addr
+ mov b0=r17
+ br.many b0 // r19 return value
+ ;;
+kvm_asm_thash_back1:
+ shr.u r23=r19,61 // get RR number
+ adds r25=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr
+ adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta
+ ;;
+ shladd r27=r23,3,r25 // get vcpu->arch.vrr[r23]'s addr
+ ld8 r17=[r16] // get PTA
+ mov r26=1
+ ;;
+ extr.u r29=r17,2,6 // get pta.size
+ ld8 r25=[r27] // get vcpu->arch.vrr[r23]'s value
+ ;;
+ extr.u r25=r25,2,6 // get rr.ps
+ shl r22=r26,r29 // 1UL << pta.size
+ ;;
+ shr.u r23=r19,r25 // vaddr >> rr.ps
+ adds r26=3,r29 // pta.size + 3
+ shl r27=r17,3 // pta << 3
+ ;;
+ shl r23=r23,3 // (vaddr >> rr.ps) << 3
+ shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
+ movl r16=7<<61
+ ;;
+ adds r22=-1,r22 // (1UL << pta.size) - 1
+ shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size
+ and r19=r19,r16 // vaddr & VRN_MASK
+ ;;
+ and r22=r22,r23 // vhpt_offset
+ or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size)
+ adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
+ ;;
+ or r19=r19,r22 // calc pval
+ shladd r17=r18,4,r26
+ adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
+ ;;
+ mov b0=r17
+ br.many b0
+END(kvm_asm_thash)
+
+#define MOV_TO_REG0 \
+{; \
+ nop.b 0x0; \
+ nop.b 0x0; \
+ nop.b 0x0; \
+ ;; \
+};
+
+
+#define MOV_TO_REG(n) \
+{; \
+ mov r##n##=r19; \
+ mov b0=r30; \
+ br.sptk.many b0; \
+ ;; \
+};
+
+
+#define MOV_FROM_REG(n) \
+{; \
+ mov r19=r##n##; \
+ mov b0=r30; \
+ br.sptk.many b0; \
+ ;; \
+};
+
+
+#define MOV_TO_BANK0_REG(n) \
+ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \
+{; \
+ mov r26=r2; \
+ mov r2=r19; \
+ bsw.1; \
+ ;; \
+}; \
+{; \
+ mov r##n##=r2; \
+ nop.b 0x0; \
+ bsw.0; \
+ ;; \
+}; \
+{; \
+ mov r2=r26; \
+ mov b0=r30; \
+ br.sptk.many b0; \
+ ;; \
+}; \
+END(asm_mov_to_bank0_reg##n##)
+
+
+#define MOV_FROM_BANK0_REG(n) \
+ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \
+{; \
+ mov r26=r2; \
+ nop.b 0x0; \
+ bsw.1; \
+ ;; \
+}; \
+{; \
+ mov r2=r##n##; \
+ nop.b 0x0; \
+ bsw.0; \
+ ;; \
+}; \
+{; \
+ mov r19=r2; \
+ mov r2=r26; \
+ mov b0=r30; \
+}; \
+{; \
+ nop.b 0x0; \
+ nop.b 0x0; \
+ br.sptk.many b0; \
+ ;; \
+}; \
+END(asm_mov_from_bank0_reg##n##)
+
+
+#define JMP_TO_MOV_TO_BANK0_REG(n) \
+{; \
+ nop.b 0x0; \
+ nop.b 0x0; \
+ br.sptk.many asm_mov_to_bank0_reg##n##; \
+ ;; \
+}
+
+
+#define JMP_TO_MOV_FROM_BANK0_REG(n) \
+{; \
+ nop.b 0x0; \
+ nop.b 0x0; \
+ br.sptk.many asm_mov_from_bank0_reg##n##; \
+ ;; \
+}
+
+
+MOV_FROM_BANK0_REG(16)
+MOV_FROM_BANK0_REG(17)
+MOV_FROM_BANK0_REG(18)
+MOV_FROM_BANK0_REG(19)
+MOV_FROM_BANK0_REG(20)
+MOV_FROM_BANK0_REG(21)
+MOV_FROM_BANK0_REG(22)
+MOV_FROM_BANK0_REG(23)
+MOV_FROM_BANK0_REG(24)
+MOV_FROM_BANK0_REG(25)
+MOV_FROM_BANK0_REG(26)
+MOV_FROM_BANK0_REG(27)
+MOV_FROM_BANK0_REG(28)
+MOV_FROM_BANK0_REG(29)
+MOV_FROM_BANK0_REG(30)
+MOV_FROM_BANK0_REG(31)
+
+
+// mov from reg table
+ENTRY(asm_mov_from_reg)
+ MOV_FROM_REG(0)
+ MOV_FROM_REG(1)
+ MOV_FROM_REG(2)
+ MOV_FROM_REG(3)
+ MOV_FROM_REG(4)
+ MOV_FROM_REG(5)
+ MOV_FROM_REG(6)
+ MOV_FROM_REG(7)
+ MOV_FROM_REG(8)
+ MOV_FROM_REG(9)
+ MOV_FROM_REG(10)
+ MOV_FROM_REG(11)
+ MOV_FROM_REG(12)
+ MOV_FROM_REG(13)
+ MOV_FROM_REG(14)
+ MOV_FROM_REG(15)
+ JMP_TO_MOV_FROM_BANK0_REG(16)
+ JMP_TO_MOV_FROM_BANK0_REG(17)
+ JMP_TO_MOV_FROM_BANK0_REG(18)
+ JMP_TO_MOV_FROM_BANK0_REG(19)
+ JMP_TO_MOV_FROM_BANK0_REG(20)
+ JMP_TO_MOV_FROM_BANK0_REG(21)
+ JMP_TO_MOV_FROM_BANK0_REG(22)
+ JMP_TO_MOV_FROM_BANK0_REG(23)
+ JMP_TO_MOV_FROM_BANK0_REG(24)
+ JMP_TO_MOV_FROM_BANK0_REG(25)
+ JMP_TO_MOV_FROM_BANK0_REG(26)
+ JMP_TO_MOV_FROM_BANK0_REG(27)
+ JMP_TO_MOV_FROM_BANK0_REG(28)
+ JMP_TO_MOV_FROM_BANK0_REG(29)
+ JMP_TO_MOV_FROM_BANK0_REG(30)
+ JMP_TO_MOV_FROM_BANK0_REG(31)
+ MOV_FROM_REG(32)
+ MOV_FROM_REG(33)
+ MOV_FROM_REG(34)
+ MOV_FROM_REG(35)
+ MOV_FROM_REG(36)
+ MOV_FROM_REG(37)
+ MOV_FROM_REG(38)
+ MOV_FROM_REG(39)
+ MOV_FROM_REG(40)
+ MOV_FROM_REG(41)
+ MOV_FROM_REG(42)
+ MOV_FROM_REG(43)
+ MOV_FROM_REG(44)
+ MOV_FROM_REG(45)
+ MOV_FROM_REG(46)
+ MOV_FROM_REG(47)
+ MOV_FROM_REG(48)
+ MOV_FROM_REG(49)
+ MOV_FROM_REG(50)
+ MOV_FROM_REG(51)
+ MOV_FROM_REG(52)
+ MOV_FROM_REG(53)
+ MOV_FROM_REG(54)
+ MOV_FROM_REG(55)
+ MOV_FROM_REG(56)
+ MOV_FROM_REG(57)
+ MOV_FROM_REG(58)
+ MOV_FROM_REG(59)
+ MOV_FROM_REG(60)
+ MOV_FROM_REG(61)
+ MOV_FROM_REG(62)
+ MOV_FROM_REG(63)
+ MOV_FROM_REG(64)
+ MOV_FROM_REG(65)
+ MOV_FROM_REG(66)
+ MOV_FROM_REG(67)
+ MOV_FROM_REG(68)
+ MOV_FROM_REG(69)
+ MOV_FROM_REG(70)
+ MOV_FROM_REG(71)
+ MOV_FROM_REG(72)
+ MOV_FROM_REG(73)
+ MOV_FROM_REG(74)
+ MOV_FROM_REG(75)
+ MOV_FROM_REG(76)
+ MOV_FROM_REG(77)
+ MOV_FROM_REG(78)
+ MOV_FROM_REG(79)
+ MOV_FROM_REG(80)
+ MOV_FROM_REG(81)
+ MOV_FROM_REG(82)
+ MOV_FROM_REG(83)
+ MOV_FROM_REG(84)
+ MOV_FROM_REG(85)
+ MOV_FROM_REG(86)
+ MOV_FROM_REG(87)
+ MOV_FROM_REG(88)
+ MOV_FROM_REG(89)
+ MOV_FROM_REG(90)
+ MOV_FROM_REG(91)
+ MOV_FROM_REG(92)
+ MOV_FROM_REG(93)
+ MOV_FROM_REG(94)
+ MOV_FROM_REG(95)
+ MOV_FROM_REG(96)
+ MOV_FROM_REG(97)
+ MOV_FROM_REG(98)
+ MOV_FROM_REG(99)
+ MOV_FROM_REG(100)
+ MOV_FROM_REG(101)
+ MOV_FROM_REG(102)
+ MOV_FROM_REG(103)
+ MOV_FROM_REG(104)
+ MOV_FROM_REG(105)
+ MOV_FROM_REG(106)
+ MOV_FROM_REG(107)
+ MOV_FROM_REG(108)
+ MOV_FROM_REG(109)
+ MOV_FROM_REG(110)
+ MOV_FROM_REG(111)
+ MOV_FROM_REG(112)
+ MOV_FROM_REG(113)
+ MOV_FROM_REG(114)
+ MOV_FROM_REG(115)
+ MOV_FROM_REG(116)
+ MOV_FROM_REG(117)
+ MOV_FROM_REG(118)
+ MOV_FROM_REG(119)
+ MOV_FROM_REG(120)
+ MOV_FROM_REG(121)
+ MOV_FROM_REG(122)
+ MOV_FROM_REG(123)
+ MOV_FROM_REG(124)
+ MOV_FROM_REG(125)
+ MOV_FROM_REG(126)
+ MOV_FROM_REG(127)
+END(asm_mov_from_reg)
+
+
+/* must be in bank 0
+ * parameter:
+ * r31: pr
+ * r24: b0
+ */
+ENTRY(kvm_resume_to_guest)
+ adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
+ ;;
+ ld8 r1 =[r16]
+ adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21
+ ;;
+ mov r16=cr.ipsr
+ ;;
+ ld8 r20 = [r20]
+ adds r19=VMM_VPD_BASE_OFFSET,r21
+ ;;
+ ld8 r25=[r19]
+ extr.u r17=r16,IA64_PSR_RI_BIT,2
+ tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
+ ;;
+ (p6) mov r18=cr.iip
+ (p6) mov r17=r0
+ ;;
+ (p6) add r18=0x10,r18
+ (p7) add r17=1,r17
+ ;;
+ (p6) mov cr.iip=r18
+ dep r16=r17,r16,IA64_PSR_RI_BIT,2
+ ;;
+ mov cr.ipsr=r16
+ adds r19= VPD_VPSR_START_OFFSET,r25
+ add r28=PAL_VPS_RESUME_NORMAL,r20
+ add r29=PAL_VPS_RESUME_HANDLER,r20
+ ;;
+ ld8 r19=[r19]
+ mov b0=r29
+ cmp.ne p6,p7 = r0,r0
+ ;;
+ tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
+ ;;
+ (p6) ld8 r26=[r25]
+ (p7) mov b0=r28
+ mov pr=r31,-2
+ br.sptk.many b0 // call pal service
+ ;;
+END(kvm_resume_to_guest)
+
+
+MOV_TO_BANK0_REG(16)
+MOV_TO_BANK0_REG(17)
+MOV_TO_BANK0_REG(18)
+MOV_TO_BANK0_REG(19)
+MOV_TO_BANK0_REG(20)
+MOV_TO_BANK0_REG(21)
+MOV_TO_BANK0_REG(22)
+MOV_TO_BANK0_REG(23)
+MOV_TO_BANK0_REG(24)
+MOV_TO_BANK0_REG(25)
+MOV_TO_BANK0_REG(26)
+MOV_TO_BANK0_REG(27)
+MOV_TO_BANK0_REG(28)
+MOV_TO_BANK0_REG(29)
+MOV_TO_BANK0_REG(30)
+MOV_TO_BANK0_REG(31)
+
+
+// mov to reg table
+ENTRY(asm_mov_to_reg)
+ MOV_TO_REG0
+ MOV_TO_REG(1)
+ MOV_TO_REG(2)
+ MOV_TO_REG(3)
+ MOV_TO_REG(4)
+ MOV_TO_REG(5)
+ MOV_TO_REG(6)
+ MOV_TO_REG(7)
+ MOV_TO_REG(8)
+ MOV_TO_REG(9)
+ MOV_TO_REG(10)
+ MOV_TO_REG(11)
+ MOV_TO_REG(12)
+ MOV_TO_REG(13)
+ MOV_TO_REG(14)
+ MOV_TO_REG(15)
+ JMP_TO_MOV_TO_BANK0_REG(16)
+ JMP_TO_MOV_TO_BANK0_REG(17)
+ JMP_TO_MOV_TO_BANK0_REG(18)
+ JMP_TO_MOV_TO_BANK0_REG(19)
+ JMP_TO_MOV_TO_BANK0_REG(20)
+ JMP_TO_MOV_TO_BANK0_REG(21)
+ JMP_TO_MOV_TO_BANK0_REG(22)
+ JMP_TO_MOV_TO_BANK0_REG(23)
+ JMP_TO_MOV_TO_BANK0_REG(24)
+ JMP_TO_MOV_TO_BANK0_REG(25)
+ JMP_TO_MOV_TO_BANK0_REG(26)
+ JMP_TO_MOV_TO_BANK0_REG(27)
+ JMP_TO_MOV_TO_BANK0_REG(28)
+ JMP_TO_MOV_TO_BANK0_REG(29)
+ JMP_TO_MOV_TO_BANK0_REG(30)
+ JMP_TO_MOV_TO_BANK0_REG(31)
+ MOV_TO_REG(32)
+ MOV_TO_REG(33)
+ MOV_TO_REG(34)
+ MOV_TO_REG(35)
+ MOV_TO_REG(36)
+ MOV_TO_REG(37)
+ MOV_TO_REG(38)
+ MOV_TO_REG(39)
+ MOV_TO_REG(40)
+ MOV_TO_REG(41)
+ MOV_TO_REG(42)
+ MOV_TO_REG(43)
+ MOV_TO_REG(44)
+ MOV_TO_REG(45)
+ MOV_TO_REG(46)
+ MOV_TO_REG(47)
+ MOV_TO_REG(48)
+ MOV_TO_REG(49)
+ MOV_TO_REG(50)
+ MOV_TO_REG(51)
+ MOV_TO_REG(52)
+ MOV_TO_REG(53)
+ MOV_TO_REG(54)
+ MOV_TO_REG(55)
+ MOV_TO_REG(56)
+ MOV_TO_REG(57)
+ MOV_TO_REG(58)
+ MOV_TO_REG(59)
+ MOV_TO_REG(60)
+ MOV_TO_REG(61)
+ MOV_TO_REG(62)
+ MOV_TO_REG(63)
+ MOV_TO_REG(64)
+ MOV_TO_REG(65)
+ MOV_TO_REG(66)
+ MOV_TO_REG(67)
+ MOV_TO_REG(68)
+ MOV_TO_REG(69)
+ MOV_TO_REG(70)
+ MOV_TO_REG(71)
+ MOV_TO_REG(72)
+ MOV_TO_REG(73)
+ MOV_TO_REG(74)
+ MOV_TO_REG(75)
+ MOV_TO_REG(76)
+ MOV_TO_REG(77)
+ MOV_TO_REG(78)
+ MOV_TO_REG(79)
+ MOV_TO_REG(80)
+ MOV_TO_REG(81)
+ MOV_TO_REG(82)
+ MOV_TO_REG(83)
+ MOV_TO_REG(84)
+ MOV_TO_REG(85)
+ MOV_TO_REG(86)
+ MOV_TO_REG(87)
+ MOV_TO_REG(88)
+ MOV_TO_REG(89)
+ MOV_TO_REG(90)
+ MOV_TO_REG(91)
+ MOV_TO_REG(92)
+ MOV_TO_REG(93)
+ MOV_TO_REG(94)
+ MOV_TO_REG(95)
+ MOV_TO_REG(96)
+ MOV_TO_REG(97)
+ MOV_TO_REG(98)
+ MOV_TO_REG(99)
+ MOV_TO_REG(100)
+ MOV_TO_REG(101)
+ MOV_TO_REG(102)
+ MOV_TO_REG(103)
+ MOV_TO_REG(104)
+ MOV_TO_REG(105)
+ MOV_TO_REG(106)
+ MOV_TO_REG(107)
+ MOV_TO_REG(108)
+ MOV_TO_REG(109)
+ MOV_TO_REG(110)
+ MOV_TO_REG(111)
+ MOV_TO_REG(112)
+ MOV_TO_REG(113)
+ MOV_TO_REG(114)
+ MOV_TO_REG(115)
+ MOV_TO_REG(116)
+ MOV_TO_REG(117)
+ MOV_TO_REG(118)
+ MOV_TO_REG(119)
+ MOV_TO_REG(120)
+ MOV_TO_REG(121)
+ MOV_TO_REG(122)
+ MOV_TO_REG(123)
+ MOV_TO_REG(124)
+ MOV_TO_REG(125)
+ MOV_TO_REG(126)
+ MOV_TO_REG(127)
+END(asm_mov_to_reg)
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
new file mode 100644
index 000000000000..5a33f7ed29a0
--- /dev/null
+++ b/arch/ia64/kvm/process.c
@@ -0,0 +1,970 @@
+/*
+ * process.c: handle interruption inject for guests.
+ * Copyright (c) 2005, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Shaofan Li (Susue Li) <susie.li@intel.com>
+ * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com>
+ * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
+ * Xiantao Zhang (xiantao.zhang@intel.com)
+ */
+#include "vcpu.h"
+
+#include <asm/pal.h>
+#include <asm/sal.h>
+#include <asm/fpswa.h>
+#include <asm/kregs.h>
+#include <asm/tlb.h>
+
+fpswa_interface_t *vmm_fpswa_interface;
+
+#define IA64_VHPT_TRANS_VECTOR 0x0000
+#define IA64_INST_TLB_VECTOR 0x0400
+#define IA64_DATA_TLB_VECTOR 0x0800
+#define IA64_ALT_INST_TLB_VECTOR 0x0c00
+#define IA64_ALT_DATA_TLB_VECTOR 0x1000
+#define IA64_DATA_NESTED_TLB_VECTOR 0x1400
+#define IA64_INST_KEY_MISS_VECTOR 0x1800
+#define IA64_DATA_KEY_MISS_VECTOR 0x1c00
+#define IA64_DIRTY_BIT_VECTOR 0x2000
+#define IA64_INST_ACCESS_BIT_VECTOR 0x2400
+#define IA64_DATA_ACCESS_BIT_VECTOR 0x2800
+#define IA64_BREAK_VECTOR 0x2c00
+#define IA64_EXTINT_VECTOR 0x3000
+#define IA64_PAGE_NOT_PRESENT_VECTOR 0x5000
+#define IA64_KEY_PERMISSION_VECTOR 0x5100
+#define IA64_INST_ACCESS_RIGHTS_VECTOR 0x5200
+#define IA64_DATA_ACCESS_RIGHTS_VECTOR 0x5300
+#define IA64_GENEX_VECTOR 0x5400
+#define IA64_DISABLED_FPREG_VECTOR 0x5500
+#define IA64_NAT_CONSUMPTION_VECTOR 0x5600
+#define IA64_SPECULATION_VECTOR 0x5700 /* UNUSED */
+#define IA64_DEBUG_VECTOR 0x5900
+#define IA64_UNALIGNED_REF_VECTOR 0x5a00
+#define IA64_UNSUPPORTED_DATA_REF_VECTOR 0x5b00
+#define IA64_FP_FAULT_VECTOR 0x5c00
+#define IA64_FP_TRAP_VECTOR 0x5d00
+#define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR 0x5e00
+#define IA64_TAKEN_BRANCH_TRAP_VECTOR 0x5f00
+#define IA64_SINGLE_STEP_TRAP_VECTOR 0x6000
+
+/* SDM vol2 5.5 - IVA based interruption handling */
+#define INITIAL_PSR_VALUE_AT_INTERRUPTION (IA64_PSR_UP | IA64_PSR_MFL |\
+ IA64_PSR_MFH | IA64_PSR_PK | IA64_PSR_DT | \
+ IA64_PSR_RT | IA64_PSR_MC|IA64_PSR_IT)
+
+#define DOMN_PAL_REQUEST 0x110000
+#define DOMN_SAL_REQUEST 0x110001
+
+static u64 vec2off[68] = {0x0, 0x400, 0x800, 0xc00, 0x1000, 0x1400, 0x1800,
+ 0x1c00, 0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00,
+ 0x4000, 0x4400, 0x4800, 0x4c00, 0x5000, 0x5100, 0x5200, 0x5300, 0x5400,
+ 0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00,
+ 0x5e00, 0x5f00, 0x6000, 0x6100, 0x6200, 0x6300, 0x6400, 0x6500, 0x6600,
+ 0x6700, 0x6800, 0x6900, 0x6a00, 0x6b00, 0x6c00, 0x6d00, 0x6e00, 0x6f00,
+ 0x7000, 0x7100, 0x7200, 0x7300, 0x7400, 0x7500, 0x7600, 0x7700, 0x7800,
+ 0x7900, 0x7a00, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00
+};
+
+static void collect_interruption(struct kvm_vcpu *vcpu)
+{
+ u64 ipsr;
+ u64 vdcr;
+ u64 vifs;
+ unsigned long vpsr;
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+ vpsr = vcpu_get_psr(vcpu);
+ vcpu_bsw0(vcpu);
+ if (vpsr & IA64_PSR_IC) {
+
+ /* Sync mpsr id/da/dd/ss/ed bits to vipsr
+ * since after guest do rfi, we still want these bits on in
+ * mpsr
+ */
+
+ ipsr = regs->cr_ipsr;
+ vpsr = vpsr | (ipsr & (IA64_PSR_ID | IA64_PSR_DA
+ | IA64_PSR_DD | IA64_PSR_SS
+ | IA64_PSR_ED));
+ vcpu_set_ipsr(vcpu, vpsr);
+
+ /* Currently, for trap, we do not advance IIP to next
+ * instruction. That's because we assume caller already
+ * set up IIP correctly
+ */
+
+ vcpu_set_iip(vcpu , regs->cr_iip);
+
+ /* set vifs.v to zero */
+ vifs = VCPU(vcpu, ifs);
+ vifs &= ~IA64_IFS_V;
+ vcpu_set_ifs(vcpu, vifs);
+
+ vcpu_set_iipa(vcpu, VMX(vcpu, cr_iipa));
+ }
+
+ vdcr = VCPU(vcpu, dcr);
+
+ /* Set guest psr
+ * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged
+ * be: set to the value of dcr.be
+ * pp: set to the value of dcr.pp
+ */
+ vpsr &= INITIAL_PSR_VALUE_AT_INTERRUPTION;
+ vpsr |= (vdcr & IA64_DCR_BE);
+
+ /* VDCR pp bit position is different from VPSR pp bit */
+ if (vdcr & IA64_DCR_PP) {
+ vpsr |= IA64_PSR_PP;
+ } else {
+ vpsr &= ~IA64_PSR_PP;;
+ }
+
+ vcpu_set_psr(vcpu, vpsr);
+
+}
+
+void inject_guest_interruption(struct kvm_vcpu *vcpu, u64 vec)
+{
+ u64 viva;
+ struct kvm_pt_regs *regs;
+ union ia64_isr pt_isr;
+
+ regs = vcpu_regs(vcpu);
+
+ /* clear cr.isr.ir (incomplete register frame)*/
+ pt_isr.val = VMX(vcpu, cr_isr);
+ pt_isr.ir = 0;
+ VMX(vcpu, cr_isr) = pt_isr.val;
+
+ collect_interruption(vcpu);
+
+ viva = vcpu_get_iva(vcpu);
+ regs->cr_iip = viva + vec;
+}
+
+static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa)
+{
+ union ia64_rr rr, rr1;
+
+ rr.val = vcpu_get_rr(vcpu, ifa);
+ rr1.val = 0;
+ rr1.ps = rr.ps;
+ rr1.rid = rr.rid;
+ return (rr1.val);
+}
+
+
+/*
+ * Set vIFA & vITIR & vIHA, when vPSR.ic =1
+ * Parameter:
+ * set_ifa: if true, set vIFA
+ * set_itir: if true, set vITIR
+ * set_iha: if true, set vIHA
+ */
+void set_ifa_itir_iha(struct kvm_vcpu *vcpu, u64 vadr,
+ int set_ifa, int set_itir, int set_iha)
+{
+ long vpsr;
+ u64 value;
+
+ vpsr = VCPU(vcpu, vpsr);
+ /* Vol2, Table 8-1 */
+ if (vpsr & IA64_PSR_IC) {
+ if (set_ifa)
+ vcpu_set_ifa(vcpu, vadr);
+ if (set_itir) {
+ value = vcpu_get_itir_on_fault(vcpu, vadr);
+ vcpu_set_itir(vcpu, value);
+ }
+
+ if (set_iha) {
+ value = vcpu_thash(vcpu, vadr);
+ vcpu_set_iha(vcpu, value);
+ }
+ }
+}
+
+/*
+ * Data TLB Fault
+ * @ Data TLB vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void dtlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ /* If vPSR.ic, IFA, ITIR, IHA */
+ set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
+ inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR);
+}
+
+/*
+ * Instruction TLB Fault
+ * @ Instruction TLB vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ /* If vPSR.ic, IFA, ITIR, IHA */
+ set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
+ inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
+}
+
+
+
+/*
+ * Data Nested TLB Fault
+ * @ Data Nested TLB Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void nested_dtlb(struct kvm_vcpu *vcpu)
+{
+ inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR);
+}
+
+/*
+ * Alternate Data TLB Fault
+ * @ Alternate Data TLB vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
+ inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
+}
+
+
+/*
+ * Data TLB Fault
+ * @ Data TLB vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void alt_itlb(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
+ inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR);
+}
+
+/* Deal with:
+ * VHPT Translation Vector
+ */
+static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ /* If vPSR.ic, IFA, ITIR, IHA*/
+ set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
+ inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
+
+
+}
+
+/*
+ * VHPT Instruction Fault
+ * @ VHPT Translation vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ _vhpt_fault(vcpu, vadr);
+}
+
+
+/*
+ * VHPT Data Fault
+ * @ VHPT Translation vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ _vhpt_fault(vcpu, vadr);
+}
+
+
+
+/*
+ * Deal with:
+ * General Exception vector
+ */
+void _general_exception(struct kvm_vcpu *vcpu)
+{
+ inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
+}
+
+
+/*
+ * Illegal Operation Fault
+ * @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void illegal_op(struct kvm_vcpu *vcpu)
+{
+ _general_exception(vcpu);
+}
+
+/*
+ * Illegal Dependency Fault
+ * @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void illegal_dep(struct kvm_vcpu *vcpu)
+{
+ _general_exception(vcpu);
+}
+
+/*
+ * Reserved Register/Field Fault
+ * @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void rsv_reg_field(struct kvm_vcpu *vcpu)
+{
+ _general_exception(vcpu);
+}
+/*
+ * Privileged Operation Fault
+ * @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+
+void privilege_op(struct kvm_vcpu *vcpu)
+{
+ _general_exception(vcpu);
+}
+
+/*
+ * Unimplement Data Address Fault
+ * @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void unimpl_daddr(struct kvm_vcpu *vcpu)
+{
+ _general_exception(vcpu);
+}
+
+/*
+ * Privileged Register Fault
+ * @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void privilege_reg(struct kvm_vcpu *vcpu)
+{
+ _general_exception(vcpu);
+}
+
+/* Deal with
+ * Nat consumption vector
+ * Parameter:
+ * vaddr: Optional, if t == REGISTER
+ */
+static void _nat_consumption_fault(struct kvm_vcpu *vcpu, u64 vadr,
+ enum tlb_miss_type t)
+{
+ /* If vPSR.ic && t == DATA/INST, IFA */
+ if (t == DATA || t == INSTRUCTION) {
+ /* IFA */
+ set_ifa_itir_iha(vcpu, vadr, 1, 0, 0);
+ }
+
+ inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR);
+}
+
+/*
+ * Instruction Nat Page Consumption Fault
+ * @ Nat Consumption Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void inat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ _nat_consumption_fault(vcpu, vadr, INSTRUCTION);
+}
+
+/*
+ * Register Nat Consumption Fault
+ * @ Nat Consumption Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void rnat_consumption(struct kvm_vcpu *vcpu)
+{
+ _nat_consumption_fault(vcpu, 0, REGISTER);
+}
+
+/*
+ * Data Nat Page Consumption Fault
+ * @ Nat Consumption Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ _nat_consumption_fault(vcpu, vadr, DATA);
+}
+
+/* Deal with
+ * Page not present vector
+ */
+static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ /* If vPSR.ic, IFA, ITIR */
+ set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
+ inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
+}
+
+
+void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ __page_not_present(vcpu, vadr);
+}
+
+
+void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ __page_not_present(vcpu, vadr);
+}
+
+
+/* Deal with
+ * Data access rights vector
+ */
+void data_access_rights(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ /* If vPSR.ic, IFA, ITIR */
+ set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
+ inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
+}
+
+fpswa_ret_t vmm_fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
+ unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
+ unsigned long *ifs, struct kvm_pt_regs *regs)
+{
+ fp_state_t fp_state;
+ fpswa_ret_t ret;
+ struct kvm_vcpu *vcpu = current_vcpu;
+
+ uint64_t old_rr7 = ia64_get_rr(7UL<<61);
+
+ if (!vmm_fpswa_interface)
+ return (fpswa_ret_t) {-1, 0, 0, 0};
+
+ /*
+ * Just let fpswa driver to use hardware fp registers.
+ * No fp register is valid in memory.
+ */
+ memset(&fp_state, 0, sizeof(fp_state_t));
+
+ /*
+ * unsigned long (*EFI_FPSWA) (
+ * unsigned long trap_type,
+ * void *Bundle,
+ * unsigned long *pipsr,
+ * unsigned long *pfsr,
+ * unsigned long *pisr,
+ * unsigned long *ppreds,
+ * unsigned long *pifs,
+ * void *fp_state);
+ */
+ /*Call host fpswa interface directly to virtualize
+ *guest fpswa request!
+ */
+ ia64_set_rr(7UL << 61, vcpu->arch.host.rr[7]);
+ ia64_srlz_d();
+
+ ret = (*vmm_fpswa_interface->fpswa) (fp_fault, bundle,
+ ipsr, fpsr, isr, pr, ifs, &fp_state);
+ ia64_set_rr(7UL << 61, old_rr7);
+ ia64_srlz_d();
+ return ret;
+}
+
+/*
+ * Handle floating-point assist faults and traps for domain.
+ */
+unsigned long vmm_handle_fpu_swa(int fp_fault, struct kvm_pt_regs *regs,
+ unsigned long isr)
+{
+ struct kvm_vcpu *v = current_vcpu;
+ IA64_BUNDLE bundle;
+ unsigned long fault_ip;
+ fpswa_ret_t ret;
+
+ fault_ip = regs->cr_iip;
+ /*
+ * When the FP trap occurs, the trapping instruction is completed.
+ * If ipsr.ri == 0, there is the trapping instruction in previous
+ * bundle.
+ */
+ if (!fp_fault && (ia64_psr(regs)->ri == 0))
+ fault_ip -= 16;
+
+ if (fetch_code(v, fault_ip, &bundle))
+ return -EAGAIN;
+
+ if (!bundle.i64[0] && !bundle.i64[1])
+ return -EACCES;
+
+ ret = vmm_fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
+ &isr, &regs->pr, &regs->cr_ifs, regs);
+ return ret.status;
+}
+
+void reflect_interruption(u64 ifa, u64 isr, u64 iim,
+ u64 vec, struct kvm_pt_regs *regs)
+{
+ u64 vector;
+ int status ;
+ struct kvm_vcpu *vcpu = current_vcpu;
+ u64 vpsr = VCPU(vcpu, vpsr);
+
+ vector = vec2off[vec];
+
+ if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) {
+ panic_vm(vcpu);
+ return;
+ }
+
+ switch (vec) {
+ case 32: /*IA64_FP_FAULT_VECTOR*/
+ status = vmm_handle_fpu_swa(1, regs, isr);
+ if (!status) {
+ vcpu_increment_iip(vcpu);
+ return;
+ } else if (-EAGAIN == status)
+ return;
+ break;
+ case 33: /*IA64_FP_TRAP_VECTOR*/
+ status = vmm_handle_fpu_swa(0, regs, isr);
+ if (!status)
+ return ;
+ else if (-EAGAIN == status) {
+ vcpu_decrement_iip(vcpu);
+ return ;
+ }
+ break;
+ }
+
+ VCPU(vcpu, isr) = isr;
+ VCPU(vcpu, iipa) = regs->cr_iip;
+ if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
+ VCPU(vcpu, iim) = iim;
+ else
+ set_ifa_itir_iha(vcpu, ifa, 1, 1, 1);
+
+ inject_guest_interruption(vcpu, vector);
+}
+
+static void set_pal_call_data(struct kvm_vcpu *vcpu)
+{
+ struct exit_ctl_data *p = &vcpu->arch.exit_data;
+
+ /*FIXME:For static and stacked convention, firmware
+ * has put the parameters in gr28-gr31 before
+ * break to vmm !!*/
+
+ p->u.pal_data.gr28 = vcpu_get_gr(vcpu, 28);
+ p->u.pal_data.gr29 = vcpu_get_gr(vcpu, 29);
+ p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
+ p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31);
+ p->exit_reason = EXIT_REASON_PAL_CALL;
+}
+
+static void set_pal_call_result(struct kvm_vcpu *vcpu)
+{
+ struct exit_ctl_data *p = &vcpu->arch.exit_data;
+
+ if (p->exit_reason == EXIT_REASON_PAL_CALL) {
+ vcpu_set_gr(vcpu, 8, p->u.pal_data.ret.status, 0);
+ vcpu_set_gr(vcpu, 9, p->u.pal_data.ret.v0, 0);
+ vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0);
+ vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0);
+ } else
+ panic_vm(vcpu);
+}
+
+static void set_sal_call_data(struct kvm_vcpu *vcpu)
+{
+ struct exit_ctl_data *p = &vcpu->arch.exit_data;
+
+ p->u.sal_data.in0 = vcpu_get_gr(vcpu, 32);
+ p->u.sal_data.in1 = vcpu_get_gr(vcpu, 33);
+ p->u.sal_data.in2 = vcpu_get_gr(vcpu, 34);
+ p->u.sal_data.in3 = vcpu_get_gr(vcpu, 35);
+ p->u.sal_data.in4 = vcpu_get_gr(vcpu, 36);
+ p->u.sal_data.in5 = vcpu_get_gr(vcpu, 37);
+ p->u.sal_data.in6 = vcpu_get_gr(vcpu, 38);
+ p->u.sal_data.in7 = vcpu_get_gr(vcpu, 39);
+ p->exit_reason = EXIT_REASON_SAL_CALL;
+}
+
+static void set_sal_call_result(struct kvm_vcpu *vcpu)
+{
+ struct exit_ctl_data *p = &vcpu->arch.exit_data;
+
+ if (p->exit_reason == EXIT_REASON_SAL_CALL) {
+ vcpu_set_gr(vcpu, 8, p->u.sal_data.ret.r8, 0);
+ vcpu_set_gr(vcpu, 9, p->u.sal_data.ret.r9, 0);
+ vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0);
+ vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0);
+ } else
+ panic_vm(vcpu);
+}
+
+void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
+ unsigned long isr, unsigned long iim)
+{
+ struct kvm_vcpu *v = current_vcpu;
+
+ if (ia64_psr(regs)->cpl == 0) {
+ /* Allow hypercalls only when cpl = 0. */
+ if (iim == DOMN_PAL_REQUEST) {
+ set_pal_call_data(v);
+ vmm_transition(v);
+ set_pal_call_result(v);
+ vcpu_increment_iip(v);
+ return;
+ } else if (iim == DOMN_SAL_REQUEST) {
+ set_sal_call_data(v);
+ vmm_transition(v);
+ set_sal_call_result(v);
+ vcpu_increment_iip(v);
+ return;
+ }
+ }
+ reflect_interruption(ifa, isr, iim, 11, regs);
+}
+
+void check_pending_irq(struct kvm_vcpu *vcpu)
+{
+ int mask, h_pending, h_inservice;
+ u64 isr;
+ unsigned long vpsr;
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+ h_pending = highest_pending_irq(vcpu);
+ if (h_pending == NULL_VECTOR) {
+ update_vhpi(vcpu, NULL_VECTOR);
+ return;
+ }
+ h_inservice = highest_inservice_irq(vcpu);
+
+ vpsr = VCPU(vcpu, vpsr);
+ mask = irq_masked(vcpu, h_pending, h_inservice);
+ if ((vpsr & IA64_PSR_I) && IRQ_NO_MASKED == mask) {
+ isr = vpsr & IA64_PSR_RI;
+ update_vhpi(vcpu, h_pending);
+ reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
+ } else if (mask == IRQ_MASKED_BY_INSVC) {
+ if (VCPU(vcpu, vhpi))
+ update_vhpi(vcpu, NULL_VECTOR);
+ } else {
+ /* masked by vpsr.i or vtpr.*/
+ update_vhpi(vcpu, h_pending);
+ }
+}
+
+static void generate_exirq(struct kvm_vcpu *vcpu)
+{
+ unsigned vpsr;
+ uint64_t isr;
+
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+ vpsr = VCPU(vcpu, vpsr);
+ isr = vpsr & IA64_PSR_RI;
+ if (!(vpsr & IA64_PSR_IC))
+ panic_vm(vcpu);
+ reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
+}
+
+void vhpi_detection(struct kvm_vcpu *vcpu)
+{
+ uint64_t threshold, vhpi;
+ union ia64_tpr vtpr;
+ struct ia64_psr vpsr;
+
+ vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+ vtpr.val = VCPU(vcpu, tpr);
+
+ threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
+ vhpi = VCPU(vcpu, vhpi);
+ if (vhpi > threshold) {
+ /* interrupt actived*/
+ generate_exirq(vcpu);
+ }
+}
+
+
+void leave_hypervisor_tail(void)
+{
+ struct kvm_vcpu *v = current_vcpu;
+
+ if (VMX(v, timer_check)) {
+ VMX(v, timer_check) = 0;
+ if (VMX(v, itc_check)) {
+ if (vcpu_get_itc(v) > VCPU(v, itm)) {
+ if (!(VCPU(v, itv) & (1 << 16))) {
+ vcpu_pend_interrupt(v, VCPU(v, itv)
+ & 0xff);
+ VMX(v, itc_check) = 0;
+ } else {
+ v->arch.timer_pending = 1;
+ }
+ VMX(v, last_itc) = VCPU(v, itm) + 1;
+ }
+ }
+ }
+
+ rmb();
+ if (v->arch.irq_new_pending) {
+ v->arch.irq_new_pending = 0;
+ VMX(v, irq_check) = 0;
+ check_pending_irq(v);
+ return;
+ }
+ if (VMX(v, irq_check)) {
+ VMX(v, irq_check) = 0;
+ vhpi_detection(v);
+ }
+}
+
+
+static inline void handle_lds(struct kvm_pt_regs *regs)
+{
+ regs->cr_ipsr |= IA64_PSR_ED;
+}
+
+void physical_tlb_miss(struct kvm_vcpu *vcpu, unsigned long vadr, int type)
+{
+ unsigned long pte;
+ union ia64_rr rr;
+
+ rr.val = ia64_get_rr(vadr);
+ pte = vadr & _PAGE_PPN_MASK;
+ pte = pte | PHY_PAGE_WB;
+ thash_vhpt_insert(vcpu, pte, (u64)(rr.ps << 2), vadr, type);
+ return;
+}
+
+void kvm_page_fault(u64 vadr , u64 vec, struct kvm_pt_regs *regs)
+{
+ unsigned long vpsr;
+ int type;
+
+ u64 vhpt_adr, gppa, pteval, rr, itir;
+ union ia64_isr misr;
+ union ia64_pta vpta;
+ struct thash_data *data;
+ struct kvm_vcpu *v = current_vcpu;
+
+ vpsr = VCPU(v, vpsr);
+ misr.val = VMX(v, cr_isr);
+
+ type = vec;
+
+ if (is_physical_mode(v) && (!(vadr << 1 >> 62))) {
+ if (vec == 2) {
+ if (__gpfn_is_io((vadr << 1) >> (PAGE_SHIFT + 1))) {
+ emulate_io_inst(v, ((vadr << 1) >> 1), 4);
+ return;
+ }
+ }
+ physical_tlb_miss(v, vadr, type);
+ return;
+ }
+ data = vtlb_lookup(v, vadr, type);
+ if (data != 0) {
+ if (type == D_TLB) {
+ gppa = (vadr & ((1UL << data->ps) - 1))
+ + (data->ppn >> (data->ps - 12) << data->ps);
+ if (__gpfn_is_io(gppa >> PAGE_SHIFT)) {
+ if (data->pl >= ((regs->cr_ipsr >>
+ IA64_PSR_CPL0_BIT) & 3))
+ emulate_io_inst(v, gppa, data->ma);
+ else {
+ vcpu_set_isr(v, misr.val);
+ data_access_rights(v, vadr);
+ }
+ return ;
+ }
+ }
+ thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
+
+ } else if (type == D_TLB) {
+ if (misr.sp) {
+ handle_lds(regs);
+ return;
+ }
+
+ rr = vcpu_get_rr(v, vadr);
+ itir = rr & (RR_RID_MASK | RR_PS_MASK);
+
+ if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) {
+ if (vpsr & IA64_PSR_IC) {
+ vcpu_set_isr(v, misr.val);
+ alt_dtlb(v, vadr);
+ } else {
+ nested_dtlb(v);
+ }
+ return ;
+ }
+
+ vpta.val = vcpu_get_pta(v);
+ /* avoid recursively walking (short format) VHPT */
+
+ vhpt_adr = vcpu_thash(v, vadr);
+ if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
+ /* VHPT successfully read. */
+ if (!(pteval & _PAGE_P)) {
+ if (vpsr & IA64_PSR_IC) {
+ vcpu_set_isr(v, misr.val);
+ dtlb_fault(v, vadr);
+ } else {
+ nested_dtlb(v);
+ }
+ } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) {
+ thash_purge_and_insert(v, pteval, itir,
+ vadr, D_TLB);
+ } else if (vpsr & IA64_PSR_IC) {
+ vcpu_set_isr(v, misr.val);
+ dtlb_fault(v, vadr);
+ } else {
+ nested_dtlb(v);
+ }
+ } else {
+ /* Can't read VHPT. */
+ if (vpsr & IA64_PSR_IC) {
+ vcpu_set_isr(v, misr.val);
+ dvhpt_fault(v, vadr);
+ } else {
+ nested_dtlb(v);
+ }
+ }
+ } else if (type == I_TLB) {
+ if (!(vpsr & IA64_PSR_IC))
+ misr.ni = 1;
+ if (!vhpt_enabled(v, vadr, INST_REF)) {
+ vcpu_set_isr(v, misr.val);
+ alt_itlb(v, vadr);
+ return;
+ }
+
+ vpta.val = vcpu_get_pta(v);
+
+ vhpt_adr = vcpu_thash(v, vadr);
+ if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
+ /* VHPT successfully read. */
+ if (pteval & _PAGE_P) {
+ if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) {
+ vcpu_set_isr(v, misr.val);
+ itlb_fault(v, vadr);
+ return ;
+ }
+ rr = vcpu_get_rr(v, vadr);
+ itir = rr & (RR_RID_MASK | RR_PS_MASK);
+ thash_purge_and_insert(v, pteval, itir,
+ vadr, I_TLB);
+ } else {
+ vcpu_set_isr(v, misr.val);
+ inst_page_not_present(v, vadr);
+ }
+ } else {
+ vcpu_set_isr(v, misr.val);
+ ivhpt_fault(v, vadr);
+ }
+ }
+}
+
+void kvm_vexirq(struct kvm_vcpu *vcpu)
+{
+ u64 vpsr, isr;
+ struct kvm_pt_regs *regs;
+
+ regs = vcpu_regs(vcpu);
+ vpsr = VCPU(vcpu, vpsr);
+ isr = vpsr & IA64_PSR_RI;
+ reflect_interruption(0, isr, 0, 12, regs); /*EXT IRQ*/
+}
+
+void kvm_ia64_handle_irq(struct kvm_vcpu *v)
+{
+ struct exit_ctl_data *p = &v->arch.exit_data;
+ long psr;
+
+ local_irq_save(psr);
+ p->exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
+ vmm_transition(v);
+ local_irq_restore(psr);
+
+ VMX(v, timer_check) = 1;
+
+}
+
+static void ptc_ga_remote_func(struct kvm_vcpu *v, int pos)
+{
+ u64 oldrid, moldrid, oldpsbits, vaddr;
+ struct kvm_ptc_g *p = &v->arch.ptc_g_data[pos];
+ vaddr = p->vaddr;
+
+ oldrid = VMX(v, vrr[0]);
+ VMX(v, vrr[0]) = p->rr;
+ oldpsbits = VMX(v, psbits[0]);
+ VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vaddr)]);
+ moldrid = ia64_get_rr(0x0);
+ ia64_set_rr(0x0, vrrtomrr(p->rr));
+ ia64_srlz_d();
+
+ vaddr = PAGEALIGN(vaddr, p->ps);
+ thash_purge_entries_remote(v, vaddr, p->ps);
+
+ VMX(v, vrr[0]) = oldrid;
+ VMX(v, psbits[0]) = oldpsbits;
+ ia64_set_rr(0x0, moldrid);
+ ia64_dv_serialize_data();
+}
+
+static void vcpu_do_resume(struct kvm_vcpu *vcpu)
+{
+ /*Re-init VHPT and VTLB once from resume*/
+ vcpu->arch.vhpt.num = VHPT_NUM_ENTRIES;
+ thash_init(&vcpu->arch.vhpt, VHPT_SHIFT);
+ vcpu->arch.vtlb.num = VTLB_NUM_ENTRIES;
+ thash_init(&vcpu->arch.vtlb, VTLB_SHIFT);
+
+ ia64_set_pta(vcpu->arch.vhpt.pta.val);
+}
+
+static void kvm_do_resume_op(struct kvm_vcpu *vcpu)
+{
+ if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) {
+ vcpu_do_resume(vcpu);
+ return;
+ }
+
+ if (unlikely(test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))) {
+ thash_purge_all(vcpu);
+ return;
+ }
+
+ if (test_and_clear_bit(KVM_REQ_PTC_G, &vcpu->requests)) {
+ while (vcpu->arch.ptc_g_count > 0)
+ ptc_ga_remote_func(vcpu, --vcpu->arch.ptc_g_count);
+ }
+}
+
+void vmm_transition(struct kvm_vcpu *vcpu)
+{
+ ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd,
+ 0, 0, 0, 0, 0, 0);
+ vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host);
+ ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd,
+ 0, 0, 0, 0, 0, 0);
+ kvm_do_resume_op(vcpu);
+}
diff --git a/arch/ia64/kvm/trampoline.S b/arch/ia64/kvm/trampoline.S
new file mode 100644
index 000000000000..30897d44d61e
--- /dev/null
+++ b/arch/ia64/kvm/trampoline.S
@@ -0,0 +1,1038 @@
+/* Save all processor states
+ *
+ * Copyright (c) 2007 Fleming Feng <fleming.feng@intel.com>
+ * Copyright (c) 2007 Anthony Xu <anthony.xu@intel.com>
+ */
+
+#include <asm/asmmacro.h>
+#include "asm-offsets.h"
+
+
+#define CTX(name) VMM_CTX_##name##_OFFSET
+
+ /*
+ * r32: context_t base address
+ */
+#define SAVE_BRANCH_REGS \
+ add r2 = CTX(B0),r32; \
+ add r3 = CTX(B1),r32; \
+ mov r16 = b0; \
+ mov r17 = b1; \
+ ;; \
+ st8 [r2]=r16,16; \
+ st8 [r3]=r17,16; \
+ ;; \
+ mov r16 = b2; \
+ mov r17 = b3; \
+ ;; \
+ st8 [r2]=r16,16; \
+ st8 [r3]=r17,16; \
+ ;; \
+ mov r16 = b4; \
+ mov r17 = b5; \
+ ;; \
+ st8 [r2]=r16; \
+ st8 [r3]=r17; \
+ ;;
+
+ /*
+ * r33: context_t base address
+ */
+#define RESTORE_BRANCH_REGS \
+ add r2 = CTX(B0),r33; \
+ add r3 = CTX(B1),r33; \
+ ;; \
+ ld8 r16=[r2],16; \
+ ld8 r17=[r3],16; \
+ ;; \
+ mov b0 = r16; \
+ mov b1 = r17; \
+ ;; \
+ ld8 r16=[r2],16; \
+ ld8 r17=[r3],16; \
+ ;; \
+ mov b2 = r16; \
+ mov b3 = r17; \
+ ;; \
+ ld8 r16=[r2]; \
+ ld8 r17=[r3]; \
+ ;; \
+ mov b4=r16; \
+ mov b5=r17; \
+ ;;
+
+
+ /*
+ * r32: context_t base address
+ * bsw == 1
+ * Save all bank1 general registers, r4 ~ r7
+ */
+#define SAVE_GENERAL_REGS \
+ add r2=CTX(R4),r32; \
+ add r3=CTX(R5),r32; \
+ ;; \
+.mem.offset 0,0; \
+ st8.spill [r2]=r4,16; \
+.mem.offset 8,0; \
+ st8.spill [r3]=r5,16; \
+ ;; \
+.mem.offset 0,0; \
+ st8.spill [r2]=r6,48; \
+.mem.offset 8,0; \
+ st8.spill [r3]=r7,48; \
+ ;; \
+.mem.offset 0,0; \
+ st8.spill [r2]=r12; \
+.mem.offset 8,0; \
+ st8.spill [r3]=r13; \
+ ;;
+
+ /*
+ * r33: context_t base address
+ * bsw == 1
+ */
+#define RESTORE_GENERAL_REGS \
+ add r2=CTX(R4),r33; \
+ add r3=CTX(R5),r33; \
+ ;; \
+ ld8.fill r4=[r2],16; \
+ ld8.fill r5=[r3],16; \
+ ;; \
+ ld8.fill r6=[r2],48; \
+ ld8.fill r7=[r3],48; \
+ ;; \
+ ld8.fill r12=[r2]; \
+ ld8.fill r13 =[r3]; \
+ ;;
+
+
+
+
+ /*
+ * r32: context_t base address
+ */
+#define SAVE_KERNEL_REGS \
+ add r2 = CTX(KR0),r32; \
+ add r3 = CTX(KR1),r32; \
+ mov r16 = ar.k0; \
+ mov r17 = ar.k1; \
+ ;; \
+ st8 [r2] = r16,16; \
+ st8 [r3] = r17,16; \
+ ;; \
+ mov r16 = ar.k2; \
+ mov r17 = ar.k3; \
+ ;; \
+ st8 [r2] = r16,16; \
+ st8 [r3] = r17,16; \
+ ;; \
+ mov r16 = ar.k4; \
+ mov r17 = ar.k5; \
+ ;; \
+ st8 [r2] = r16,16; \
+ st8 [r3] = r17,16; \
+ ;; \
+ mov r16 = ar.k6; \
+ mov r17 = ar.k7; \
+ ;; \
+ st8 [r2] = r16; \
+ st8 [r3] = r17; \
+ ;;
+
+
+
+ /*
+ * r33: context_t base address
+ */
+#define RESTORE_KERNEL_REGS \
+ add r2 = CTX(KR0),r33; \
+ add r3 = CTX(KR1),r33; \
+ ;; \
+ ld8 r16=[r2],16; \
+ ld8 r17=[r3],16; \
+ ;; \
+ mov ar.k0=r16; \
+ mov ar.k1=r17; \
+ ;; \
+ ld8 r16=[r2],16; \
+ ld8 r17=[r3],16; \
+ ;; \
+ mov ar.k2=r16; \
+ mov ar.k3=r17; \
+ ;; \
+ ld8 r16=[r2],16; \
+ ld8 r17=[r3],16; \
+ ;; \
+ mov ar.k4=r16; \
+ mov ar.k5=r17; \
+ ;; \
+ ld8 r16=[r2],16; \
+ ld8 r17=[r3],16; \
+ ;; \
+ mov ar.k6=r16; \
+ mov ar.k7=r17; \
+ ;;
+
+
+
+ /*
+ * r32: context_t base address
+ */
+#define SAVE_APP_REGS \
+ add r2 = CTX(BSPSTORE),r32; \
+ mov r16 = ar.bspstore; \
+ ;; \
+ st8 [r2] = r16,CTX(RNAT)-CTX(BSPSTORE);\
+ mov r16 = ar.rnat; \
+ ;; \
+ st8 [r2] = r16,CTX(FCR)-CTX(RNAT); \
+ mov r16 = ar.fcr; \
+ ;; \
+ st8 [r2] = r16,CTX(EFLAG)-CTX(FCR); \
+ mov r16 = ar.eflag; \
+ ;; \
+ st8 [r2] = r16,CTX(CFLG)-CTX(EFLAG); \
+ mov r16 = ar.cflg; \
+ ;; \
+ st8 [r2] = r16,CTX(FSR)-CTX(CFLG); \
+ mov r16 = ar.fsr; \
+ ;; \
+ st8 [r2] = r16,CTX(FIR)-CTX(FSR); \
+ mov r16 = ar.fir; \
+ ;; \
+ st8 [r2] = r16,CTX(FDR)-CTX(FIR); \
+ mov r16 = ar.fdr; \
+ ;; \
+ st8 [r2] = r16,CTX(UNAT)-CTX(FDR); \
+ mov r16 = ar.unat; \
+ ;; \
+ st8 [r2] = r16,CTX(FPSR)-CTX(UNAT); \
+ mov r16 = ar.fpsr; \
+ ;; \
+ st8 [r2] = r16,CTX(PFS)-CTX(FPSR); \
+ mov r16 = ar.pfs; \
+ ;; \
+ st8 [r2] = r16,CTX(LC)-CTX(PFS); \
+ mov r16 = ar.lc; \
+ ;; \
+ st8 [r2] = r16; \
+ ;;
+
+ /*
+ * r33: context_t base address
+ */
+#define RESTORE_APP_REGS \
+ add r2=CTX(BSPSTORE),r33; \
+ ;; \
+ ld8 r16=[r2],CTX(RNAT)-CTX(BSPSTORE); \
+ ;; \
+ mov ar.bspstore=r16; \
+ ld8 r16=[r2],CTX(FCR)-CTX(RNAT); \
+ ;; \
+ mov ar.rnat=r16; \
+ ld8 r16=[r2],CTX(EFLAG)-CTX(FCR); \
+ ;; \
+ mov ar.fcr=r16; \
+ ld8 r16=[r2],CTX(CFLG)-CTX(EFLAG); \
+ ;; \
+ mov ar.eflag=r16; \
+ ld8 r16=[r2],CTX(FSR)-CTX(CFLG); \
+ ;; \
+ mov ar.cflg=r16; \
+ ld8 r16=[r2],CTX(FIR)-CTX(FSR); \
+ ;; \
+ mov ar.fsr=r16; \
+ ld8 r16=[r2],CTX(FDR)-CTX(FIR); \
+ ;; \
+ mov ar.fir=r16; \
+ ld8 r16=[r2],CTX(UNAT)-CTX(FDR); \
+ ;; \
+ mov ar.fdr=r16; \
+ ld8 r16=[r2],CTX(FPSR)-CTX(UNAT); \
+ ;; \
+ mov ar.unat=r16; \
+ ld8 r16=[r2],CTX(PFS)-CTX(FPSR); \
+ ;; \
+ mov ar.fpsr=r16; \
+ ld8 r16=[r2],CTX(LC)-CTX(PFS); \
+ ;; \
+ mov ar.pfs=r16; \
+ ld8 r16=[r2]; \
+ ;; \
+ mov ar.lc=r16; \
+ ;;
+
+ /*
+ * r32: context_t base address
+ */
+#define SAVE_CTL_REGS \
+ add r2 = CTX(DCR),r32; \
+ mov r16 = cr.dcr; \
+ ;; \
+ st8 [r2] = r16,CTX(IVA)-CTX(DCR); \
+ ;; \
+ mov r16 = cr.iva; \
+ ;; \
+ st8 [r2] = r16,CTX(PTA)-CTX(IVA); \
+ ;; \
+ mov r16 = cr.pta; \
+ ;; \
+ st8 [r2] = r16 ; \
+ ;;
+
+ /*
+ * r33: context_t base address
+ */
+#define RESTORE_CTL_REGS \
+ add r2 = CTX(DCR),r33; \
+ ;; \
+ ld8 r16 = [r2],CTX(IVA)-CTX(DCR); \
+ ;; \
+ mov cr.dcr = r16; \
+ dv_serialize_data; \
+ ;; \
+ ld8 r16 = [r2],CTX(PTA)-CTX(IVA); \
+ ;; \
+ mov cr.iva = r16; \
+ dv_serialize_data; \
+ ;; \
+ ld8 r16 = [r2]; \
+ ;; \
+ mov cr.pta = r16; \
+ dv_serialize_data; \
+ ;;
+
+
+ /*
+ * r32: context_t base address
+ */
+#define SAVE_REGION_REGS \
+ add r2=CTX(RR0),r32; \
+ mov r16=rr[r0]; \
+ dep.z r18=1,61,3; \
+ ;; \
+ st8 [r2]=r16,8; \
+ mov r17=rr[r18]; \
+ dep.z r18=2,61,3; \
+ ;; \
+ st8 [r2]=r17,8; \
+ mov r16=rr[r18]; \
+ dep.z r18=3,61,3; \
+ ;; \
+ st8 [r2]=r16,8; \
+ mov r17=rr[r18]; \
+ dep.z r18=4,61,3; \
+ ;; \
+ st8 [r2]=r17,8; \
+ mov r16=rr[r18]; \
+ dep.z r18=5,61,3; \
+ ;; \
+ st8 [r2]=r16,8; \
+ mov r17=rr[r18]; \
+ dep.z r18=7,61,3; \
+ ;; \
+ st8 [r2]=r17,16; \
+ mov r16=rr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ ;;
+
+ /*
+ * r33:context_t base address
+ */
+#define RESTORE_REGION_REGS \
+ add r2=CTX(RR0),r33;\
+ mov r18=r0; \
+ ;; \
+ ld8 r20=[r2],8; \
+ ;; /* rr0 */ \
+ ld8 r21=[r2],8; \
+ ;; /* rr1 */ \
+ ld8 r22=[r2],8; \
+ ;; /* rr2 */ \
+ ld8 r23=[r2],8; \
+ ;; /* rr3 */ \
+ ld8 r24=[r2],8; \
+ ;; /* rr4 */ \
+ ld8 r25=[r2],16; \
+ ;; /* rr5 */ \
+ ld8 r27=[r2]; \
+ ;; /* rr7 */ \
+ mov rr[r18]=r20; \
+ dep.z r18=1,61,3; \
+ ;; /* rr1 */ \
+ mov rr[r18]=r21; \
+ dep.z r18=2,61,3; \
+ ;; /* rr2 */ \
+ mov rr[r18]=r22; \
+ dep.z r18=3,61,3; \
+ ;; /* rr3 */ \
+ mov rr[r18]=r23; \
+ dep.z r18=4,61,3; \
+ ;; /* rr4 */ \
+ mov rr[r18]=r24; \
+ dep.z r18=5,61,3; \
+ ;; /* rr5 */ \
+ mov rr[r18]=r25; \
+ dep.z r18=7,61,3; \
+ ;; /* rr7 */ \
+ mov rr[r18]=r27; \
+ ;; \
+ srlz.i; \
+ ;;
+
+
+
+ /*
+ * r32: context_t base address
+ * r36~r39:scratch registers
+ */
+#define SAVE_DEBUG_REGS \
+ add r2=CTX(IBR0),r32; \
+ add r3=CTX(DBR0),r32; \
+ mov r16=ibr[r0]; \
+ mov r17=dbr[r0]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ add r18=1,r0; \
+ ;; \
+ mov r16=ibr[r18]; \
+ mov r17=dbr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ add r18=2,r0; \
+ ;; \
+ mov r16=ibr[r18]; \
+ mov r17=dbr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ add r18=2,r0; \
+ ;; \
+ mov r16=ibr[r18]; \
+ mov r17=dbr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ add r18=3,r0; \
+ ;; \
+ mov r16=ibr[r18]; \
+ mov r17=dbr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ add r18=4,r0; \
+ ;; \
+ mov r16=ibr[r18]; \
+ mov r17=dbr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ add r18=5,r0; \
+ ;; \
+ mov r16=ibr[r18]; \
+ mov r17=dbr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ add r18=6,r0; \
+ ;; \
+ mov r16=ibr[r18]; \
+ mov r17=dbr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ add r18=7,r0; \
+ ;; \
+ mov r16=ibr[r18]; \
+ mov r17=dbr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ ;;
+
+
+/*
+ * r33: point to context_t structure
+ * ar.lc are corrupted.
+ */
+#define RESTORE_DEBUG_REGS \
+ add r2=CTX(IBR0),r33; \
+ add r3=CTX(DBR0),r33; \
+ mov r16=7; \
+ mov r17=r0; \
+ ;; \
+ mov ar.lc = r16; \
+ ;; \
+1: \
+ ld8 r18=[r2],8; \
+ ld8 r19=[r3],8; \
+ ;; \
+ mov ibr[r17]=r18; \
+ mov dbr[r17]=r19; \
+ ;; \
+ srlz.i; \
+ ;; \
+ add r17=1,r17; \
+ br.cloop.sptk 1b; \
+ ;;
+
+
+ /*
+ * r32: context_t base address
+ */
+#define SAVE_FPU_LOW \
+ add r2=CTX(F2),r32; \
+ add r3=CTX(F3),r32; \
+ ;; \
+ stf.spill.nta [r2]=f2,32; \
+ stf.spill.nta [r3]=f3,32; \
+ ;; \
+ stf.spill.nta [r2]=f4,32; \
+ stf.spill.nta [r3]=f5,32; \
+ ;; \
+ stf.spill.nta [r2]=f6,32; \
+ stf.spill.nta [r3]=f7,32; \
+ ;; \
+ stf.spill.nta [r2]=f8,32; \
+ stf.spill.nta [r3]=f9,32; \
+ ;; \
+ stf.spill.nta [r2]=f10,32; \
+ stf.spill.nta [r3]=f11,32; \
+ ;; \
+ stf.spill.nta [r2]=f12,32; \
+ stf.spill.nta [r3]=f13,32; \
+ ;; \
+ stf.spill.nta [r2]=f14,32; \
+ stf.spill.nta [r3]=f15,32; \
+ ;; \
+ stf.spill.nta [r2]=f16,32; \
+ stf.spill.nta [r3]=f17,32; \
+ ;; \
+ stf.spill.nta [r2]=f18,32; \
+ stf.spill.nta [r3]=f19,32; \
+ ;; \
+ stf.spill.nta [r2]=f20,32; \
+ stf.spill.nta [r3]=f21,32; \
+ ;; \
+ stf.spill.nta [r2]=f22,32; \
+ stf.spill.nta [r3]=f23,32; \
+ ;; \
+ stf.spill.nta [r2]=f24,32; \
+ stf.spill.nta [r3]=f25,32; \
+ ;; \
+ stf.spill.nta [r2]=f26,32; \
+ stf.spill.nta [r3]=f27,32; \
+ ;; \
+ stf.spill.nta [r2]=f28,32; \
+ stf.spill.nta [r3]=f29,32; \
+ ;; \
+ stf.spill.nta [r2]=f30; \
+ stf.spill.nta [r3]=f31; \
+ ;;
+
+ /*
+ * r32: context_t base address
+ */
+#define SAVE_FPU_HIGH \
+ add r2=CTX(F32),r32; \
+ add r3=CTX(F33),r32; \
+ ;; \
+ stf.spill.nta [r2]=f32,32; \
+ stf.spill.nta [r3]=f33,32; \
+ ;; \
+ stf.spill.nta [r2]=f34,32; \
+ stf.spill.nta [r3]=f35,32; \
+ ;; \
+ stf.spill.nta [r2]=f36,32; \
+ stf.spill.nta [r3]=f37,32; \
+ ;; \
+ stf.spill.nta [r2]=f38,32; \
+ stf.spill.nta [r3]=f39,32; \
+ ;; \
+ stf.spill.nta [r2]=f40,32; \
+ stf.spill.nta [r3]=f41,32; \
+ ;; \
+ stf.spill.nta [r2]=f42,32; \
+ stf.spill.nta [r3]=f43,32; \
+ ;; \
+ stf.spill.nta [r2]=f44,32; \
+ stf.spill.nta [r3]=f45,32; \
+ ;; \
+ stf.spill.nta [r2]=f46,32; \
+ stf.spill.nta [r3]=f47,32; \
+ ;; \
+ stf.spill.nta [r2]=f48,32; \
+ stf.spill.nta [r3]=f49,32; \
+ ;; \
+ stf.spill.nta [r2]=f50,32; \
+ stf.spill.nta [r3]=f51,32; \
+ ;; \
+ stf.spill.nta [r2]=f52,32; \
+ stf.spill.nta [r3]=f53,32; \
+ ;; \
+ stf.spill.nta [r2]=f54,32; \
+ stf.spill.nta [r3]=f55,32; \
+ ;; \
+ stf.spill.nta [r2]=f56,32; \
+ stf.spill.nta [r3]=f57,32; \
+ ;; \
+ stf.spill.nta [r2]=f58,32; \
+ stf.spill.nta [r3]=f59,32; \
+ ;; \
+ stf.spill.nta [r2]=f60,32; \
+ stf.spill.nta [r3]=f61,32; \
+ ;; \
+ stf.spill.nta [r2]=f62,32; \
+ stf.spill.nta [r3]=f63,32; \
+ ;; \
+ stf.spill.nta [r2]=f64,32; \
+ stf.spill.nta [r3]=f65,32; \
+ ;; \
+ stf.spill.nta [r2]=f66,32; \
+ stf.spill.nta [r3]=f67,32; \
+ ;; \
+ stf.spill.nta [r2]=f68,32; \
+ stf.spill.nta [r3]=f69,32; \
+ ;; \
+ stf.spill.nta [r2]=f70,32; \
+ stf.spill.nta [r3]=f71,32; \
+ ;; \
+ stf.spill.nta [r2]=f72,32; \
+ stf.spill.nta [r3]=f73,32; \
+ ;; \
+ stf.spill.nta [r2]=f74,32; \
+ stf.spill.nta [r3]=f75,32; \
+ ;; \
+ stf.spill.nta [r2]=f76,32; \
+ stf.spill.nta [r3]=f77,32; \
+ ;; \
+ stf.spill.nta [r2]=f78,32; \
+ stf.spill.nta [r3]=f79,32; \
+ ;; \
+ stf.spill.nta [r2]=f80,32; \
+ stf.spill.nta [r3]=f81,32; \
+ ;; \
+ stf.spill.nta [r2]=f82,32; \
+ stf.spill.nta [r3]=f83,32; \
+ ;; \
+ stf.spill.nta [r2]=f84,32; \
+ stf.spill.nta [r3]=f85,32; \
+ ;; \
+ stf.spill.nta [r2]=f86,32; \
+ stf.spill.nta [r3]=f87,32; \
+ ;; \
+ stf.spill.nta [r2]=f88,32; \
+ stf.spill.nta [r3]=f89,32; \
+ ;; \
+ stf.spill.nta [r2]=f90,32; \
+ stf.spill.nta [r3]=f91,32; \
+ ;; \
+ stf.spill.nta [r2]=f92,32; \
+ stf.spill.nta [r3]=f93,32; \
+ ;; \
+ stf.spill.nta [r2]=f94,32; \
+ stf.spill.nta [r3]=f95,32; \
+ ;; \
+ stf.spill.nta [r2]=f96,32; \
+ stf.spill.nta [r3]=f97,32; \
+ ;; \
+ stf.spill.nta [r2]=f98,32; \
+ stf.spill.nta [r3]=f99,32; \
+ ;; \
+ stf.spill.nta [r2]=f100,32; \
+ stf.spill.nta [r3]=f101,32; \
+ ;; \
+ stf.spill.nta [r2]=f102,32; \
+ stf.spill.nta [r3]=f103,32; \
+ ;; \
+ stf.spill.nta [r2]=f104,32; \
+ stf.spill.nta [r3]=f105,32; \
+ ;; \
+ stf.spill.nta [r2]=f106,32; \
+ stf.spill.nta [r3]=f107,32; \
+ ;; \
+ stf.spill.nta [r2]=f108,32; \
+ stf.spill.nta [r3]=f109,32; \
+ ;; \
+ stf.spill.nta [r2]=f110,32; \
+ stf.spill.nta [r3]=f111,32; \
+ ;; \
+ stf.spill.nta [r2]=f112,32; \
+ stf.spill.nta [r3]=f113,32; \
+ ;; \
+ stf.spill.nta [r2]=f114,32; \
+ stf.spill.nta [r3]=f115,32; \
+ ;; \
+ stf.spill.nta [r2]=f116,32; \
+ stf.spill.nta [r3]=f117,32; \
+ ;; \
+ stf.spill.nta [r2]=f118,32; \
+ stf.spill.nta [r3]=f119,32; \
+ ;; \
+ stf.spill.nta [r2]=f120,32; \
+ stf.spill.nta [r3]=f121,32; \
+ ;; \
+ stf.spill.nta [r2]=f122,32; \
+ stf.spill.nta [r3]=f123,32; \
+ ;; \
+ stf.spill.nta [r2]=f124,32; \
+ stf.spill.nta [r3]=f125,32; \
+ ;; \
+ stf.spill.nta [r2]=f126; \
+ stf.spill.nta [r3]=f127; \
+ ;;
+
+ /*
+ * r33: point to context_t structure
+ */
+#define RESTORE_FPU_LOW \
+ add r2 = CTX(F2), r33; \
+ add r3 = CTX(F3), r33; \
+ ;; \
+ ldf.fill.nta f2 = [r2], 32; \
+ ldf.fill.nta f3 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f4 = [r2], 32; \
+ ldf.fill.nta f5 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f6 = [r2], 32; \
+ ldf.fill.nta f7 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f8 = [r2], 32; \
+ ldf.fill.nta f9 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f10 = [r2], 32; \
+ ldf.fill.nta f11 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f12 = [r2], 32; \
+ ldf.fill.nta f13 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f14 = [r2], 32; \
+ ldf.fill.nta f15 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f16 = [r2], 32; \
+ ldf.fill.nta f17 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f18 = [r2], 32; \
+ ldf.fill.nta f19 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f20 = [r2], 32; \
+ ldf.fill.nta f21 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f22 = [r2], 32; \
+ ldf.fill.nta f23 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f24 = [r2], 32; \
+ ldf.fill.nta f25 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f26 = [r2], 32; \
+ ldf.fill.nta f27 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f28 = [r2], 32; \
+ ldf.fill.nta f29 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f30 = [r2], 32; \
+ ldf.fill.nta f31 = [r3], 32; \
+ ;;
+
+
+
+ /*
+ * r33: point to context_t structure
+ */
+#define RESTORE_FPU_HIGH \
+ add r2 = CTX(F32), r33; \
+ add r3 = CTX(F33), r33; \
+ ;; \
+ ldf.fill.nta f32 = [r2], 32; \
+ ldf.fill.nta f33 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f34 = [r2], 32; \
+ ldf.fill.nta f35 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f36 = [r2], 32; \
+ ldf.fill.nta f37 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f38 = [r2], 32; \
+ ldf.fill.nta f39 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f40 = [r2], 32; \
+ ldf.fill.nta f41 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f42 = [r2], 32; \
+ ldf.fill.nta f43 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f44 = [r2], 32; \
+ ldf.fill.nta f45 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f46 = [r2], 32; \
+ ldf.fill.nta f47 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f48 = [r2], 32; \
+ ldf.fill.nta f49 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f50 = [r2], 32; \
+ ldf.fill.nta f51 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f52 = [r2], 32; \
+ ldf.fill.nta f53 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f54 = [r2], 32; \
+ ldf.fill.nta f55 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f56 = [r2], 32; \
+ ldf.fill.nta f57 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f58 = [r2], 32; \
+ ldf.fill.nta f59 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f60 = [r2], 32; \
+ ldf.fill.nta f61 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f62 = [r2], 32; \
+ ldf.fill.nta f63 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f64 = [r2], 32; \
+ ldf.fill.nta f65 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f66 = [r2], 32; \
+ ldf.fill.nta f67 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f68 = [r2], 32; \
+ ldf.fill.nta f69 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f70 = [r2], 32; \
+ ldf.fill.nta f71 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f72 = [r2], 32; \
+ ldf.fill.nta f73 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f74 = [r2], 32; \
+ ldf.fill.nta f75 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f76 = [r2], 32; \
+ ldf.fill.nta f77 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f78 = [r2], 32; \
+ ldf.fill.nta f79 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f80 = [r2], 32; \
+ ldf.fill.nta f81 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f82 = [r2], 32; \
+ ldf.fill.nta f83 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f84 = [r2], 32; \
+ ldf.fill.nta f85 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f86 = [r2], 32; \
+ ldf.fill.nta f87 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f88 = [r2], 32; \
+ ldf.fill.nta f89 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f90 = [r2], 32; \
+ ldf.fill.nta f91 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f92 = [r2], 32; \
+ ldf.fill.nta f93 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f94 = [r2], 32; \
+ ldf.fill.nta f95 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f96 = [r2], 32; \
+ ldf.fill.nta f97 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f98 = [r2], 32; \
+ ldf.fill.nta f99 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f100 = [r2], 32; \
+ ldf.fill.nta f101 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f102 = [r2], 32; \
+ ldf.fill.nta f103 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f104 = [r2], 32; \
+ ldf.fill.nta f105 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f106 = [r2], 32; \
+ ldf.fill.nta f107 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f108 = [r2], 32; \
+ ldf.fill.nta f109 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f110 = [r2], 32; \
+ ldf.fill.nta f111 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f112 = [r2], 32; \
+ ldf.fill.nta f113 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f114 = [r2], 32; \
+ ldf.fill.nta f115 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f116 = [r2], 32; \
+ ldf.fill.nta f117 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f118 = [r2], 32; \
+ ldf.fill.nta f119 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f120 = [r2], 32; \
+ ldf.fill.nta f121 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f122 = [r2], 32; \
+ ldf.fill.nta f123 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f124 = [r2], 32; \
+ ldf.fill.nta f125 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f126 = [r2], 32; \
+ ldf.fill.nta f127 = [r3], 32; \
+ ;;
+
+ /*
+ * r32: context_t base address
+ */
+#define SAVE_PTK_REGS \
+ add r2=CTX(PKR0), r32; \
+ mov r16=7; \
+ ;; \
+ mov ar.lc=r16; \
+ mov r17=r0; \
+ ;; \
+1: \
+ mov r18=pkr[r17]; \
+ ;; \
+ srlz.i; \
+ ;; \
+ st8 [r2]=r18, 8; \
+ ;; \
+ add r17 =1,r17; \
+ ;; \
+ br.cloop.sptk 1b; \
+ ;;
+
+/*
+ * r33: point to context_t structure
+ * ar.lc are corrupted.
+ */
+#define RESTORE_PTK_REGS \
+ add r2=CTX(PKR0), r33; \
+ mov r16=7; \
+ ;; \
+ mov ar.lc=r16; \
+ mov r17=r0; \
+ ;; \
+1: \
+ ld8 r18=[r2], 8; \
+ ;; \
+ mov pkr[r17]=r18; \
+ ;; \
+ srlz.i; \
+ ;; \
+ add r17 =1,r17; \
+ ;; \
+ br.cloop.sptk 1b; \
+ ;;
+
+
+/*
+ * void vmm_trampoline( context_t * from,
+ * context_t * to)
+ *
+ * from: r32
+ * to: r33
+ * note: interrupt disabled before call this function.
+ */
+GLOBAL_ENTRY(vmm_trampoline)
+ mov r16 = psr
+ adds r2 = CTX(PSR), r32
+ ;;
+ st8 [r2] = r16, 8 // psr
+ mov r17 = pr
+ ;;
+ st8 [r2] = r17, 8 // pr
+ mov r18 = ar.unat
+ ;;
+ st8 [r2] = r18
+ mov r17 = ar.rsc
+ ;;
+ adds r2 = CTX(RSC),r32
+ ;;
+ st8 [r2]= r17
+ mov ar.rsc =0
+ flushrs
+ ;;
+ SAVE_GENERAL_REGS
+ ;;
+ SAVE_KERNEL_REGS
+ ;;
+ SAVE_APP_REGS
+ ;;
+ SAVE_BRANCH_REGS
+ ;;
+ SAVE_CTL_REGS
+ ;;
+ SAVE_REGION_REGS
+ ;;
+ //SAVE_DEBUG_REGS
+ ;;
+ rsm psr.dfl
+ ;;
+ srlz.d
+ ;;
+ SAVE_FPU_LOW
+ ;;
+ rsm psr.dfh
+ ;;
+ srlz.d
+ ;;
+ SAVE_FPU_HIGH
+ ;;
+ SAVE_PTK_REGS
+ ;;
+ RESTORE_PTK_REGS
+ ;;
+ RESTORE_FPU_HIGH
+ ;;
+ RESTORE_FPU_LOW
+ ;;
+ //RESTORE_DEBUG_REGS
+ ;;
+ RESTORE_REGION_REGS
+ ;;
+ RESTORE_CTL_REGS
+ ;;
+ RESTORE_BRANCH_REGS
+ ;;
+ RESTORE_APP_REGS
+ ;;
+ RESTORE_KERNEL_REGS
+ ;;
+ RESTORE_GENERAL_REGS
+ ;;
+ adds r2=CTX(PSR), r33
+ ;;
+ ld8 r16=[r2], 8 // psr
+ ;;
+ mov psr.l=r16
+ ;;
+ srlz.d
+ ;;
+ ld8 r16=[r2], 8 // pr
+ ;;
+ mov pr =r16,-1
+ ld8 r16=[r2] // unat
+ ;;
+ mov ar.unat=r16
+ ;;
+ adds r2=CTX(RSC),r33
+ ;;
+ ld8 r16 =[r2]
+ ;;
+ mov ar.rsc = r16
+ ;;
+ br.ret.sptk.few b0
+END(vmm_trampoline)
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
new file mode 100644
index 000000000000..e44027ce5667
--- /dev/null
+++ b/arch/ia64/kvm/vcpu.c
@@ -0,0 +1,2163 @@
+/*
+ * kvm_vcpu.c: handling all virtual cpu related thing.
+ * Copyright (c) 2005, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Shaofan Li (Susue Li) <susie.li@intel.com>
+ * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
+ * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
+ * Xiantao Zhang <xiantao.zhang@intel.com>
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/types.h>
+
+#include <asm/processor.h>
+#include <asm/ia64regs.h>
+#include <asm/gcc_intrin.h>
+#include <asm/kregs.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+
+#include "asm-offsets.h"
+#include "vcpu.h"
+
+/*
+ * Special notes:
+ * - Index by it/dt/rt sequence
+ * - Only existing mode transitions are allowed in this table
+ * - RSE is placed at lazy mode when emulating guest partial mode
+ * - If gva happens to be rr0 and rr4, only allowed case is identity
+ * mapping (gva=gpa), or panic! (How?)
+ */
+int mm_switch_table[8][8] = {
+ /* 2004/09/12(Kevin): Allow switch to self */
+ /*
+ * (it,dt,rt): (0,0,0) -> (1,1,1)
+ * This kind of transition usually occurs in the very early
+ * stage of Linux boot up procedure. Another case is in efi
+ * and pal calls. (see "arch/ia64/kernel/head.S")
+ *
+ * (it,dt,rt): (0,0,0) -> (0,1,1)
+ * This kind of transition is found when OSYa exits efi boot
+ * service. Due to gva = gpa in this case (Same region),
+ * data access can be satisfied though itlb entry for physical
+ * emulation is hit.
+ */
+ {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V},
+ {0, 0, 0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0, 0, 0},
+ /*
+ * (it,dt,rt): (0,1,1) -> (1,1,1)
+ * This kind of transition is found in OSYa.
+ *
+ * (it,dt,rt): (0,1,1) -> (0,0,0)
+ * This kind of transition is found in OSYa
+ */
+ {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V},
+ /* (1,0,0)->(1,1,1) */
+ {0, 0, 0, 0, 0, 0, 0, SW_P2V},
+ /*
+ * (it,dt,rt): (1,0,1) -> (1,1,1)
+ * This kind of transition usually occurs when Linux returns
+ * from the low level TLB miss handlers.
+ * (see "arch/ia64/kernel/ivt.S")
+ */
+ {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V},
+ {0, 0, 0, 0, 0, 0, 0, 0},
+ /*
+ * (it,dt,rt): (1,1,1) -> (1,0,1)
+ * This kind of transition usually occurs in Linux low level
+ * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
+ *
+ * (it,dt,rt): (1,1,1) -> (0,0,0)
+ * This kind of transition usually occurs in pal and efi calls,
+ * which requires running in physical mode.
+ * (see "arch/ia64/kernel/head.S")
+ * (1,1,1)->(1,0,0)
+ */
+
+ {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
+};
+
+void physical_mode_init(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.mode_flags = GUEST_IN_PHY;
+}
+
+void switch_to_physical_rid(struct kvm_vcpu *vcpu)
+{
+ unsigned long psr;
+
+ /* Save original virtual mode rr[0] and rr[4] */
+ psr = ia64_clear_ic();
+ ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
+ ia64_srlz_d();
+ ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
+ ia64_srlz_d();
+
+ ia64_set_psr(psr);
+ return;
+}
+
+
+void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
+{
+ unsigned long psr;
+
+ psr = ia64_clear_ic();
+ ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
+ ia64_srlz_d();
+ ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
+ ia64_srlz_d();
+ ia64_set_psr(psr);
+ return;
+}
+
+static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
+{
+ return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
+}
+
+void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
+ struct ia64_psr new_psr)
+{
+ int act;
+ act = mm_switch_action(old_psr, new_psr);
+ switch (act) {
+ case SW_V2P:
+ /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
+ old_psr.val, new_psr.val);*/
+ switch_to_physical_rid(vcpu);
+ /*
+ * Set rse to enforced lazy, to prevent active rse
+ *save/restor when guest physical mode.
+ */
+ vcpu->arch.mode_flags |= GUEST_IN_PHY;
+ break;
+ case SW_P2V:
+ switch_to_virtual_rid(vcpu);
+ /*
+ * recover old mode which is saved when entering
+ * guest physical mode
+ */
+ vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
+ break;
+ case SW_SELF:
+ break;
+ case SW_NOP:
+ break;
+ default:
+ /* Sanity check */
+ break;
+ }
+ return;
+}
+
+
+
+/*
+ * In physical mode, insert tc/tr for region 0 and 4 uses
+ * RID[0] and RID[4] which is for physical mode emulation.
+ * However what those inserted tc/tr wants is rid for
+ * virtual mode. So original virtual rid needs to be restored
+ * before insert.
+ *
+ * Operations which required such switch include:
+ * - insertions (itc.*, itr.*)
+ * - purges (ptc.* and ptr.*)
+ * - tpa
+ * - tak
+ * - thash?, ttag?
+ * All above needs actual virtual rid for destination entry.
+ */
+
+void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
+ struct ia64_psr new_psr)
+{
+
+ if ((old_psr.dt != new_psr.dt)
+ || (old_psr.it != new_psr.it)
+ || (old_psr.rt != new_psr.rt))
+ switch_mm_mode(vcpu, old_psr, new_psr);
+
+ return;
+}
+
+
+/*
+ * In physical mode, insert tc/tr for region 0 and 4 uses
+ * RID[0] and RID[4] which is for physical mode emulation.
+ * However what those inserted tc/tr wants is rid for
+ * virtual mode. So original virtual rid needs to be restored
+ * before insert.
+ *
+ * Operations which required such switch include:
+ * - insertions (itc.*, itr.*)
+ * - purges (ptc.* and ptr.*)
+ * - tpa
+ * - tak
+ * - thash?, ttag?
+ * All above needs actual virtual rid for destination entry.
+ */
+
+void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
+{
+ if (is_physical_mode(vcpu)) {
+ vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
+ switch_to_virtual_rid(vcpu);
+ }
+ return;
+}
+
+/* Recover always follows prepare */
+void recover_if_physical_mode(struct kvm_vcpu *vcpu)
+{
+ if (is_physical_mode(vcpu))
+ switch_to_physical_rid(vcpu);
+ vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
+ return;
+}
+
+#define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
+
+static u16 gr_info[32] = {
+ 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
+ RPT(r1), RPT(r2), RPT(r3),
+ RPT(r4), RPT(r5), RPT(r6), RPT(r7),
+ RPT(r8), RPT(r9), RPT(r10), RPT(r11),
+ RPT(r12), RPT(r13), RPT(r14), RPT(r15),
+ RPT(r16), RPT(r17), RPT(r18), RPT(r19),
+ RPT(r20), RPT(r21), RPT(r22), RPT(r23),
+ RPT(r24), RPT(r25), RPT(r26), RPT(r27),
+ RPT(r28), RPT(r29), RPT(r30), RPT(r31)
+};
+
+#define IA64_FIRST_STACKED_GR 32
+#define IA64_FIRST_ROTATING_FR 32
+
+static inline unsigned long
+rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
+{
+ reg += rrb;
+ if (reg >= sor)
+ reg -= sor;
+ return reg;
+}
+
+/*
+ * Return the (rotated) index for floating point register
+ * be in the REGNUM (REGNUM must range from 32-127,
+ * result is in the range from 0-95.
+ */
+static inline unsigned long fph_index(struct kvm_pt_regs *regs,
+ long regnum)
+{
+ unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
+ return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
+}
+
+
+/*
+ * The inverse of the above: given bspstore and the number of
+ * registers, calculate ar.bsp.
+ */
+static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
+ long num_regs)
+{
+ long delta = ia64_rse_slot_num(addr) + num_regs;
+ int i = 0;
+
+ if (num_regs < 0)
+ delta -= 0x3e;
+ if (delta < 0) {
+ while (delta <= -0x3f) {
+ i--;
+ delta += 0x3f;
+ }
+ } else {
+ while (delta >= 0x3f) {
+ i++;
+ delta -= 0x3f;
+ }
+ }
+
+ return addr + num_regs + i;
+}
+
+static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
+ unsigned long *val, int *nat)
+{
+ unsigned long *bsp, *addr, *rnat_addr, *bspstore;
+ unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
+ unsigned long nat_mask;
+ unsigned long old_rsc, new_rsc;
+ long sof = (regs->cr_ifs) & 0x7f;
+ long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
+ long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
+ long ridx = r1 - 32;
+
+ if (ridx < sor)
+ ridx = rotate_reg(sor, rrb_gr, ridx);
+
+ old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
+ new_rsc = old_rsc&(~(0x3));
+ ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
+
+ bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
+ bsp = kbs + (regs->loadrs >> 19);
+
+ addr = kvm_rse_skip_regs(bsp, -sof + ridx);
+ nat_mask = 1UL << ia64_rse_slot_num(addr);
+ rnat_addr = ia64_rse_rnat_addr(addr);
+
+ if (addr >= bspstore) {
+ ia64_flushrs();
+ ia64_mf();
+ bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
+ }
+ *val = *addr;
+ if (nat) {
+ if (bspstore < rnat_addr)
+ *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
+ & nat_mask);
+ else
+ *nat = (int)!!((*rnat_addr) & nat_mask);
+ ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
+ }
+}
+
+void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
+ unsigned long val, unsigned long nat)
+{
+ unsigned long *bsp, *bspstore, *addr, *rnat_addr;
+ unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
+ unsigned long nat_mask;
+ unsigned long old_rsc, new_rsc, psr;
+ unsigned long rnat;
+ long sof = (regs->cr_ifs) & 0x7f;
+ long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
+ long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
+ long ridx = r1 - 32;
+
+ if (ridx < sor)
+ ridx = rotate_reg(sor, rrb_gr, ridx);
+
+ old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
+ /* put RSC to lazy mode, and set loadrs 0 */
+ new_rsc = old_rsc & (~0x3fff0003);
+ ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
+ bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
+
+ addr = kvm_rse_skip_regs(bsp, -sof + ridx);
+ nat_mask = 1UL << ia64_rse_slot_num(addr);
+ rnat_addr = ia64_rse_rnat_addr(addr);
+
+ local_irq_save(psr);
+ bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
+ if (addr >= bspstore) {
+
+ ia64_flushrs();
+ ia64_mf();
+ *addr = val;
+ bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
+ rnat = ia64_getreg(_IA64_REG_AR_RNAT);
+ if (bspstore < rnat_addr)
+ rnat = rnat & (~nat_mask);
+ else
+ *rnat_addr = (*rnat_addr)&(~nat_mask);
+
+ ia64_mf();
+ ia64_loadrs();
+ ia64_setreg(_IA64_REG_AR_RNAT, rnat);
+ } else {
+ rnat = ia64_getreg(_IA64_REG_AR_RNAT);
+ *addr = val;
+ if (bspstore < rnat_addr)
+ rnat = rnat&(~nat_mask);
+ else
+ *rnat_addr = (*rnat_addr) & (~nat_mask);
+
+ ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore);
+ ia64_setreg(_IA64_REG_AR_RNAT, rnat);
+ }
+ local_irq_restore(psr);
+ ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
+}
+
+void getreg(unsigned long regnum, unsigned long *val,
+ int *nat, struct kvm_pt_regs *regs)
+{
+ unsigned long addr, *unat;
+ if (regnum >= IA64_FIRST_STACKED_GR) {
+ get_rse_reg(regs, regnum, val, nat);
+ return;
+ }
+
+ /*
+ * Now look at registers in [0-31] range and init correct UNAT
+ */
+ addr = (unsigned long)regs;
+ unat = &regs->eml_unat;;
+
+ addr += gr_info[regnum];
+
+ *val = *(unsigned long *)addr;
+ /*
+ * do it only when requested
+ */
+ if (nat)
+ *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
+}
+
+void setreg(unsigned long regnum, unsigned long val,
+ int nat, struct kvm_pt_regs *regs)
+{
+ unsigned long addr;
+ unsigned long bitmask;
+ unsigned long *unat;
+
+ /*
+ * First takes care of stacked registers
+ */
+ if (regnum >= IA64_FIRST_STACKED_GR) {
+ set_rse_reg(regs, regnum, val, nat);
+ return;
+ }
+
+ /*
+ * Now look at registers in [0-31] range and init correct UNAT
+ */
+ addr = (unsigned long)regs;
+ unat = &regs->eml_unat;
+ /*
+ * add offset from base of struct
+ * and do it !
+ */
+ addr += gr_info[regnum];
+
+ *(unsigned long *)addr = val;
+
+ /*
+ * We need to clear the corresponding UNAT bit to fully emulate the load
+ * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
+ */
+ bitmask = 1UL << ((addr >> 3) & 0x3f);
+ if (nat)
+ *unat |= bitmask;
+ else
+ *unat &= ~bitmask;
+
+}
+
+u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
+{
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+ u64 val;
+
+ if (!reg)
+ return 0;
+ getreg(reg, &val, 0, regs);
+ return val;
+}
+
+void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat)
+{
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+ long sof = (regs->cr_ifs) & 0x7f;
+
+ if (!reg)
+ return;
+ if (reg >= sof + 32)
+ return;
+ setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/
+}
+
+void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
+ struct kvm_pt_regs *regs)
+{
+ /* Take floating register rotation into consideration*/
+ if (regnum >= IA64_FIRST_ROTATING_FR)
+ regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
+#define CASE_FIXED_FP(reg) \
+ case (reg) : \
+ ia64_stf_spill(fpval, reg); \
+ break
+
+ switch (regnum) {
+ CASE_FIXED_FP(0);
+ CASE_FIXED_FP(1);
+ CASE_FIXED_FP(2);
+ CASE_FIXED_FP(3);
+ CASE_FIXED_FP(4);
+ CASE_FIXED_FP(5);
+
+ CASE_FIXED_FP(6);
+ CASE_FIXED_FP(7);
+ CASE_FIXED_FP(8);
+ CASE_FIXED_FP(9);
+ CASE_FIXED_FP(10);
+ CASE_FIXED_FP(11);
+
+ CASE_FIXED_FP(12);
+ CASE_FIXED_FP(13);
+ CASE_FIXED_FP(14);
+ CASE_FIXED_FP(15);
+ CASE_FIXED_FP(16);
+ CASE_FIXED_FP(17);
+ CASE_FIXED_FP(18);
+ CASE_FIXED_FP(19);
+ CASE_FIXED_FP(20);
+ CASE_FIXED_FP(21);
+ CASE_FIXED_FP(22);
+ CASE_FIXED_FP(23);
+ CASE_FIXED_FP(24);
+ CASE_FIXED_FP(25);
+ CASE_FIXED_FP(26);
+ CASE_FIXED_FP(27);
+ CASE_FIXED_FP(28);
+ CASE_FIXED_FP(29);
+ CASE_FIXED_FP(30);
+ CASE_FIXED_FP(31);
+ CASE_FIXED_FP(32);
+ CASE_FIXED_FP(33);
+ CASE_FIXED_FP(34);
+ CASE_FIXED_FP(35);
+ CASE_FIXED_FP(36);
+ CASE_FIXED_FP(37);
+ CASE_FIXED_FP(38);
+ CASE_FIXED_FP(39);
+ CASE_FIXED_FP(40);
+ CASE_FIXED_FP(41);
+ CASE_FIXED_FP(42);
+ CASE_FIXED_FP(43);
+ CASE_FIXED_FP(44);
+ CASE_FIXED_FP(45);
+ CASE_FIXED_FP(46);
+ CASE_FIXED_FP(47);
+ CASE_FIXED_FP(48);
+ CASE_FIXED_FP(49);
+ CASE_FIXED_FP(50);
+ CASE_FIXED_FP(51);
+ CASE_FIXED_FP(52);
+ CASE_FIXED_FP(53);
+ CASE_FIXED_FP(54);
+ CASE_FIXED_FP(55);
+ CASE_FIXED_FP(56);
+ CASE_FIXED_FP(57);
+ CASE_FIXED_FP(58);
+ CASE_FIXED_FP(59);
+ CASE_FIXED_FP(60);
+ CASE_FIXED_FP(61);
+ CASE_FIXED_FP(62);
+ CASE_FIXED_FP(63);
+ CASE_FIXED_FP(64);
+ CASE_FIXED_FP(65);
+ CASE_FIXED_FP(66);
+ CASE_FIXED_FP(67);
+ CASE_FIXED_FP(68);
+ CASE_FIXED_FP(69);
+ CASE_FIXED_FP(70);
+ CASE_FIXED_FP(71);
+ CASE_FIXED_FP(72);
+ CASE_FIXED_FP(73);
+ CASE_FIXED_FP(74);
+ CASE_FIXED_FP(75);
+ CASE_FIXED_FP(76);
+ CASE_FIXED_FP(77);
+ CASE_FIXED_FP(78);
+ CASE_FIXED_FP(79);
+ CASE_FIXED_FP(80);
+ CASE_FIXED_FP(81);
+ CASE_FIXED_FP(82);
+ CASE_FIXED_FP(83);
+ CASE_FIXED_FP(84);
+ CASE_FIXED_FP(85);
+ CASE_FIXED_FP(86);
+ CASE_FIXED_FP(87);
+ CASE_FIXED_FP(88);
+ CASE_FIXED_FP(89);
+ CASE_FIXED_FP(90);
+ CASE_FIXED_FP(91);
+ CASE_FIXED_FP(92);
+ CASE_FIXED_FP(93);
+ CASE_FIXED_FP(94);
+ CASE_FIXED_FP(95);
+ CASE_FIXED_FP(96);
+ CASE_FIXED_FP(97);
+ CASE_FIXED_FP(98);
+ CASE_FIXED_FP(99);
+ CASE_FIXED_FP(100);
+ CASE_FIXED_FP(101);
+ CASE_FIXED_FP(102);
+ CASE_FIXED_FP(103);
+ CASE_FIXED_FP(104);
+ CASE_FIXED_FP(105);
+ CASE_FIXED_FP(106);
+ CASE_FIXED_FP(107);
+ CASE_FIXED_FP(108);
+ CASE_FIXED_FP(109);
+ CASE_FIXED_FP(110);
+ CASE_FIXED_FP(111);
+ CASE_FIXED_FP(112);
+ CASE_FIXED_FP(113);
+ CASE_FIXED_FP(114);
+ CASE_FIXED_FP(115);
+ CASE_FIXED_FP(116);
+ CASE_FIXED_FP(117);
+ CASE_FIXED_FP(118);
+ CASE_FIXED_FP(119);
+ CASE_FIXED_FP(120);
+ CASE_FIXED_FP(121);
+ CASE_FIXED_FP(122);
+ CASE_FIXED_FP(123);
+ CASE_FIXED_FP(124);
+ CASE_FIXED_FP(125);
+ CASE_FIXED_FP(126);
+ CASE_FIXED_FP(127);
+ }
+#undef CASE_FIXED_FP
+}
+
+void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
+ struct kvm_pt_regs *regs)
+{
+ /* Take floating register rotation into consideration*/
+ if (regnum >= IA64_FIRST_ROTATING_FR)
+ regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
+
+#define CASE_FIXED_FP(reg) \
+ case (reg) : \
+ ia64_ldf_fill(reg, fpval); \
+ break
+
+ switch (regnum) {
+ CASE_FIXED_FP(2);
+ CASE_FIXED_FP(3);
+ CASE_FIXED_FP(4);
+ CASE_FIXED_FP(5);
+
+ CASE_FIXED_FP(6);
+ CASE_FIXED_FP(7);
+ CASE_FIXED_FP(8);
+ CASE_FIXED_FP(9);
+ CASE_FIXED_FP(10);
+ CASE_FIXED_FP(11);
+
+ CASE_FIXED_FP(12);
+ CASE_FIXED_FP(13);
+ CASE_FIXED_FP(14);
+ CASE_FIXED_FP(15);
+ CASE_FIXED_FP(16);
+ CASE_FIXED_FP(17);
+ CASE_FIXED_FP(18);
+ CASE_FIXED_FP(19);
+ CASE_FIXED_FP(20);
+ CASE_FIXED_FP(21);
+ CASE_FIXED_FP(22);
+ CASE_FIXED_FP(23);
+ CASE_FIXED_FP(24);
+ CASE_FIXED_FP(25);
+ CASE_FIXED_FP(26);
+ CASE_FIXED_FP(27);
+ CASE_FIXED_FP(28);
+ CASE_FIXED_FP(29);
+ CASE_FIXED_FP(30);
+ CASE_FIXED_FP(31);
+ CASE_FIXED_FP(32);
+ CASE_FIXED_FP(33);
+ CASE_FIXED_FP(34);
+ CASE_FIXED_FP(35);
+ CASE_FIXED_FP(36);
+ CASE_FIXED_FP(37);
+ CASE_FIXED_FP(38);
+ CASE_FIXED_FP(39);
+ CASE_FIXED_FP(40);
+ CASE_FIXED_FP(41);
+ CASE_FIXED_FP(42);
+ CASE_FIXED_FP(43);
+ CASE_FIXED_FP(44);
+ CASE_FIXED_FP(45);
+ CASE_FIXED_FP(46);
+ CASE_FIXED_FP(47);
+ CASE_FIXED_FP(48);
+ CASE_FIXED_FP(49);
+ CASE_FIXED_FP(50);
+ CASE_FIXED_FP(51);
+ CASE_FIXED_FP(52);
+ CASE_FIXED_FP(53);
+ CASE_FIXED_FP(54);
+ CASE_FIXED_FP(55);
+ CASE_FIXED_FP(56);
+ CASE_FIXED_FP(57);
+ CASE_FIXED_FP(58);
+ CASE_FIXED_FP(59);
+ CASE_FIXED_FP(60);
+ CASE_FIXED_FP(61);
+ CASE_FIXED_FP(62);
+ CASE_FIXED_FP(63);
+ CASE_FIXED_FP(64);
+ CASE_FIXED_FP(65);
+ CASE_FIXED_FP(66);
+ CASE_FIXED_FP(67);
+ CASE_FIXED_FP(68);
+ CASE_FIXED_FP(69);
+ CASE_FIXED_FP(70);
+ CASE_FIXED_FP(71);
+ CASE_FIXED_FP(72);
+ CASE_FIXED_FP(73);
+ CASE_FIXED_FP(74);
+ CASE_FIXED_FP(75);
+ CASE_FIXED_FP(76);
+ CASE_FIXED_FP(77);
+ CASE_FIXED_FP(78);
+ CASE_FIXED_FP(79);
+ CASE_FIXED_FP(80);
+ CASE_FIXED_FP(81);
+ CASE_FIXED_FP(82);
+ CASE_FIXED_FP(83);
+ CASE_FIXED_FP(84);
+ CASE_FIXED_FP(85);
+ CASE_FIXED_FP(86);
+ CASE_FIXED_FP(87);
+ CASE_FIXED_FP(88);
+ CASE_FIXED_FP(89);
+ CASE_FIXED_FP(90);
+ CASE_FIXED_FP(91);
+ CASE_FIXED_FP(92);
+ CASE_FIXED_FP(93);
+ CASE_FIXED_FP(94);
+ CASE_FIXED_FP(95);
+ CASE_FIXED_FP(96);
+ CASE_FIXED_FP(97);
+ CASE_FIXED_FP(98);
+ CASE_FIXED_FP(99);
+ CASE_FIXED_FP(100);
+ CASE_FIXED_FP(101);
+ CASE_FIXED_FP(102);
+ CASE_FIXED_FP(103);
+ CASE_FIXED_FP(104);
+ CASE_FIXED_FP(105);
+ CASE_FIXED_FP(106);
+ CASE_FIXED_FP(107);
+ CASE_FIXED_FP(108);
+ CASE_FIXED_FP(109);
+ CASE_FIXED_FP(110);
+ CASE_FIXED_FP(111);
+ CASE_FIXED_FP(112);
+ CASE_FIXED_FP(113);
+ CASE_FIXED_FP(114);
+ CASE_FIXED_FP(115);
+ CASE_FIXED_FP(116);
+ CASE_FIXED_FP(117);
+ CASE_FIXED_FP(118);
+ CASE_FIXED_FP(119);
+ CASE_FIXED_FP(120);
+ CASE_FIXED_FP(121);
+ CASE_FIXED_FP(122);
+ CASE_FIXED_FP(123);
+ CASE_FIXED_FP(124);
+ CASE_FIXED_FP(125);
+ CASE_FIXED_FP(126);
+ CASE_FIXED_FP(127);
+ }
+}
+
+void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
+ struct ia64_fpreg *val)
+{
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+ getfpreg(reg, val, regs); /* FIXME: handle NATs later*/
+}
+
+void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
+ struct ia64_fpreg *val)
+{
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+ if (reg > 1)
+ setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
+}
+
+/************************************************************************
+ * lsapic timer
+ ***********************************************************************/
+u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
+{
+ unsigned long guest_itc;
+ guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
+
+ if (guest_itc >= VMX(vcpu, last_itc)) {
+ VMX(vcpu, last_itc) = guest_itc;
+ return guest_itc;
+ } else
+ return VMX(vcpu, last_itc);
+}
+
+static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
+static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
+{
+ struct kvm_vcpu *v;
+ int i;
+ long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
+ unsigned long vitv = VCPU(vcpu, itv);
+
+ if (vcpu->vcpu_id == 0) {
+ for (i = 0; i < MAX_VCPU_NUM; i++) {
+ v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i);
+ VMX(v, itc_offset) = itc_offset;
+ VMX(v, last_itc) = 0;
+ }
+ }
+ VMX(vcpu, last_itc) = 0;
+ if (VCPU(vcpu, itm) <= val) {
+ VMX(vcpu, itc_check) = 0;
+ vcpu_unpend_interrupt(vcpu, vitv);
+ } else {
+ VMX(vcpu, itc_check) = 1;
+ vcpu_set_itm(vcpu, VCPU(vcpu, itm));
+ }
+
+}
+
+static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, itm));
+}
+
+static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
+{
+ unsigned long vitv = VCPU(vcpu, itv);
+ VCPU(vcpu, itm) = val;
+
+ if (val > vcpu_get_itc(vcpu)) {
+ VMX(vcpu, itc_check) = 1;
+ vcpu_unpend_interrupt(vcpu, vitv);
+ VMX(vcpu, timer_pending) = 0;
+ } else
+ VMX(vcpu, itc_check) = 0;
+}
+
+#define ITV_VECTOR(itv) (itv&0xff)
+#define ITV_IRQ_MASK(itv) (itv&(1<<16))
+
+static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, itv) = val;
+ if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
+ vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
+ vcpu->arch.timer_pending = 0;
+ }
+}
+
+static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
+{
+ int vec;
+
+ vec = highest_inservice_irq(vcpu);
+ if (vec == NULL_VECTOR)
+ return;
+ VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
+ VCPU(vcpu, eoi) = 0;
+ vcpu->arch.irq_new_pending = 1;
+
+}
+
+/* See Table 5-8 in SDM vol2 for the definition */
+int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
+{
+ union ia64_tpr vtpr;
+
+ vtpr.val = VCPU(vcpu, tpr);
+
+ if (h_inservice == NMI_VECTOR)
+ return IRQ_MASKED_BY_INSVC;
+
+ if (h_pending == NMI_VECTOR) {
+ /* Non Maskable Interrupt */
+ return IRQ_NO_MASKED;
+ }
+
+ if (h_inservice == ExtINT_VECTOR)
+ return IRQ_MASKED_BY_INSVC;
+
+ if (h_pending == ExtINT_VECTOR) {
+ if (vtpr.mmi) {
+ /* mask all external IRQ */
+ return IRQ_MASKED_BY_VTPR;
+ } else
+ return IRQ_NO_MASKED;
+ }
+
+ if (is_higher_irq(h_pending, h_inservice)) {
+ if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
+ return IRQ_NO_MASKED;
+ else
+ return IRQ_MASKED_BY_VTPR;
+ } else {
+ return IRQ_MASKED_BY_INSVC;
+ }
+}
+
+void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
+{
+ long spsr;
+ int ret;
+
+ local_irq_save(spsr);
+ ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
+ local_irq_restore(spsr);
+
+ vcpu->arch.irq_new_pending = 1;
+}
+
+void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
+{
+ long spsr;
+ int ret;
+
+ local_irq_save(spsr);
+ ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
+ local_irq_restore(spsr);
+ if (ret) {
+ vcpu->arch.irq_new_pending = 1;
+ wmb();
+ }
+}
+
+void update_vhpi(struct kvm_vcpu *vcpu, int vec)
+{
+ u64 vhpi;
+
+ if (vec == NULL_VECTOR)
+ vhpi = 0;
+ else if (vec == NMI_VECTOR)
+ vhpi = 32;
+ else if (vec == ExtINT_VECTOR)
+ vhpi = 16;
+ else
+ vhpi = vec >> 4;
+
+ VCPU(vcpu, vhpi) = vhpi;
+ if (VCPU(vcpu, vac).a_int)
+ ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
+ (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
+}
+
+u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
+{
+ int vec, h_inservice, mask;
+
+ vec = highest_pending_irq(vcpu);
+ h_inservice = highest_inservice_irq(vcpu);
+ mask = irq_masked(vcpu, vec, h_inservice);
+ if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
+ if (VCPU(vcpu, vhpi))
+ update_vhpi(vcpu, NULL_VECTOR);
+ return IA64_SPURIOUS_INT_VECTOR;
+ }
+ if (mask == IRQ_MASKED_BY_VTPR) {
+ update_vhpi(vcpu, vec);
+ return IA64_SPURIOUS_INT_VECTOR;
+ }
+ VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
+ vcpu_unpend_interrupt(vcpu, vec);
+ return (u64)vec;
+}
+
+/**************************************************************************
+ Privileged operation emulation routines
+ **************************************************************************/
+u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ union ia64_pta vpta;
+ union ia64_rr vrr;
+ u64 pval;
+ u64 vhpt_offset;
+
+ vpta.val = vcpu_get_pta(vcpu);
+ vrr.val = vcpu_get_rr(vcpu, vadr);
+ vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
+ if (vpta.vf) {
+ pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
+ vpta.val, 0, 0, 0, 0);
+ } else {
+ pval = (vadr & VRN_MASK) | vhpt_offset |
+ (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
+ }
+ return pval;
+}
+
+u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ union ia64_rr vrr;
+ union ia64_pta vpta;
+ u64 pval;
+
+ vpta.val = vcpu_get_pta(vcpu);
+ vrr.val = vcpu_get_rr(vcpu, vadr);
+ if (vpta.vf) {
+ pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
+ 0, 0, 0, 0, 0);
+ } else
+ pval = 1;
+
+ return pval;
+}
+
+u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ struct thash_data *data;
+ union ia64_pta vpta;
+ u64 key;
+
+ vpta.val = vcpu_get_pta(vcpu);
+ if (vpta.vf == 0) {
+ key = 1;
+ return key;
+ }
+ data = vtlb_lookup(vcpu, vadr, D_TLB);
+ if (!data || !data->p)
+ key = 1;
+ else
+ key = data->key;
+
+ return key;
+}
+
+
+
+void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long thash, vadr;
+
+ vadr = vcpu_get_gr(vcpu, inst.M46.r3);
+ thash = vcpu_thash(vcpu, vadr);
+ vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
+}
+
+
+void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long tag, vadr;
+
+ vadr = vcpu_get_gr(vcpu, inst.M46.r3);
+ tag = vcpu_ttag(vcpu, vadr);
+ vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
+}
+
+int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
+{
+ struct thash_data *data;
+ union ia64_isr visr, pt_isr;
+ struct kvm_pt_regs *regs;
+ struct ia64_psr vpsr;
+
+ regs = vcpu_regs(vcpu);
+ pt_isr.val = VMX(vcpu, cr_isr);
+ visr.val = 0;
+ visr.ei = pt_isr.ei;
+ visr.ir = pt_isr.ir;
+ vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+ visr.na = 1;
+
+ data = vhpt_lookup(vadr);
+ if (data) {
+ if (data->p == 0) {
+ vcpu_set_isr(vcpu, visr.val);
+ data_page_not_present(vcpu, vadr);
+ return IA64_FAULT;
+ } else if (data->ma == VA_MATTR_NATPAGE) {
+ vcpu_set_isr(vcpu, visr.val);
+ dnat_page_consumption(vcpu, vadr);
+ return IA64_FAULT;
+ } else {
+ *padr = (data->gpaddr >> data->ps << data->ps) |
+ (vadr & (PSIZE(data->ps) - 1));
+ return IA64_NO_FAULT;
+ }
+ }
+
+ data = vtlb_lookup(vcpu, vadr, D_TLB);
+ if (data) {
+ if (data->p == 0) {
+ vcpu_set_isr(vcpu, visr.val);
+ data_page_not_present(vcpu, vadr);
+ return IA64_FAULT;
+ } else if (data->ma == VA_MATTR_NATPAGE) {
+ vcpu_set_isr(vcpu, visr.val);
+ dnat_page_consumption(vcpu, vadr);
+ return IA64_FAULT;
+ } else{
+ *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
+ | (vadr & (PSIZE(data->ps) - 1));
+ return IA64_NO_FAULT;
+ }
+ }
+ if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
+ if (vpsr.ic) {
+ vcpu_set_isr(vcpu, visr.val);
+ alt_dtlb(vcpu, vadr);
+ return IA64_FAULT;
+ } else {
+ nested_dtlb(vcpu);
+ return IA64_FAULT;
+ }
+ } else {
+ if (vpsr.ic) {
+ vcpu_set_isr(vcpu, visr.val);
+ dvhpt_fault(vcpu, vadr);
+ return IA64_FAULT;
+ } else{
+ nested_dtlb(vcpu);
+ return IA64_FAULT;
+ }
+ }
+
+ return IA64_NO_FAULT;
+}
+
+
+int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r1, r3;
+
+ r3 = vcpu_get_gr(vcpu, inst.M46.r3);
+
+ if (vcpu_tpa(vcpu, r3, &r1))
+ return IA64_FAULT;
+
+ vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
+ return(IA64_NO_FAULT);
+}
+
+void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r1, r3;
+
+ r3 = vcpu_get_gr(vcpu, inst.M46.r3);
+ r1 = vcpu_tak(vcpu, r3);
+ vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
+}
+
+
+/************************************
+ * Insert/Purge translation register/cache
+ ************************************/
+void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
+{
+ thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
+}
+
+void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
+{
+ thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
+}
+
+void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
+{
+ u64 ps, va, rid;
+ struct thash_data *p_itr;
+
+ ps = itir_ps(itir);
+ va = PAGEALIGN(ifa, ps);
+ pte &= ~PAGE_FLAGS_RV_MASK;
+ rid = vcpu_get_rr(vcpu, ifa);
+ rid = rid & RR_RID_MASK;
+ p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
+ vcpu_set_tr(p_itr, pte, itir, va, rid);
+ vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
+}
+
+
+void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
+{
+ u64 gpfn;
+ u64 ps, va, rid;
+ struct thash_data *p_dtr;
+
+ ps = itir_ps(itir);
+ va = PAGEALIGN(ifa, ps);
+ pte &= ~PAGE_FLAGS_RV_MASK;
+
+ if (ps != _PAGE_SIZE_16M)
+ thash_purge_entries(vcpu, va, ps);
+ gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
+ if (__gpfn_is_io(gpfn))
+ pte |= VTLB_PTE_IO;
+ rid = vcpu_get_rr(vcpu, va);
+ rid = rid & RR_RID_MASK;
+ p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
+ vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
+ pte, itir, va, rid);
+ vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
+}
+
+void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
+{
+ int index;
+ u64 va;
+
+ va = PAGEALIGN(ifa, ps);
+ while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
+ vcpu->arch.dtrs[index].page_flags = 0;
+
+ thash_purge_entries(vcpu, va, ps);
+}
+
+void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
+{
+ int index;
+ u64 va;
+
+ va = PAGEALIGN(ifa, ps);
+ while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
+ vcpu->arch.itrs[index].page_flags = 0;
+
+ thash_purge_entries(vcpu, va, ps);
+}
+
+void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
+{
+ va = PAGEALIGN(va, ps);
+ thash_purge_entries(vcpu, va, ps);
+}
+
+void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
+{
+ thash_purge_all(vcpu);
+}
+
+void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
+{
+ struct exit_ctl_data *p = &vcpu->arch.exit_data;
+ long psr;
+ local_irq_save(psr);
+ p->exit_reason = EXIT_REASON_PTC_G;
+
+ p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
+ p->u.ptc_g_data.vaddr = va;
+ p->u.ptc_g_data.ps = ps;
+ vmm_transition(vcpu);
+ /* Do Local Purge Here*/
+ vcpu_ptc_l(vcpu, va, ps);
+ local_irq_restore(psr);
+}
+
+
+void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
+{
+ vcpu_ptc_ga(vcpu, va, ps);
+}
+
+void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long ifa;
+
+ ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+ vcpu_ptc_e(vcpu, ifa);
+}
+
+void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long ifa, itir;
+
+ ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+ itir = vcpu_get_gr(vcpu, inst.M45.r2);
+ vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
+}
+
+void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long ifa, itir;
+
+ ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+ itir = vcpu_get_gr(vcpu, inst.M45.r2);
+ vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
+}
+
+void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long ifa, itir;
+
+ ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+ itir = vcpu_get_gr(vcpu, inst.M45.r2);
+ vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
+}
+
+void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long ifa, itir;
+
+ ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+ itir = vcpu_get_gr(vcpu, inst.M45.r2);
+ vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
+}
+
+void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long ifa, itir;
+
+ ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+ itir = vcpu_get_gr(vcpu, inst.M45.r2);
+ vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
+}
+
+void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long itir, ifa, pte, slot;
+
+ slot = vcpu_get_gr(vcpu, inst.M45.r3);
+ pte = vcpu_get_gr(vcpu, inst.M45.r2);
+ itir = vcpu_get_itir(vcpu);
+ ifa = vcpu_get_ifa(vcpu);
+ vcpu_itr_d(vcpu, slot, pte, itir, ifa);
+}
+
+
+
+void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long itir, ifa, pte, slot;
+
+ slot = vcpu_get_gr(vcpu, inst.M45.r3);
+ pte = vcpu_get_gr(vcpu, inst.M45.r2);
+ itir = vcpu_get_itir(vcpu);
+ ifa = vcpu_get_ifa(vcpu);
+ vcpu_itr_i(vcpu, slot, pte, itir, ifa);
+}
+
+void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long itir, ifa, pte;
+
+ itir = vcpu_get_itir(vcpu);
+ ifa = vcpu_get_ifa(vcpu);
+ pte = vcpu_get_gr(vcpu, inst.M45.r2);
+ vcpu_itc_d(vcpu, pte, itir, ifa);
+}
+
+void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long itir, ifa, pte;
+
+ itir = vcpu_get_itir(vcpu);
+ ifa = vcpu_get_ifa(vcpu);
+ pte = vcpu_get_gr(vcpu, inst.M45.r2);
+ vcpu_itc_i(vcpu, pte, itir, ifa);
+}
+
+/*************************************
+ * Moves to semi-privileged registers
+ *************************************/
+
+void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long imm;
+
+ if (inst.M30.s)
+ imm = -inst.M30.imm;
+ else
+ imm = inst.M30.imm;
+
+ vcpu_set_itc(vcpu, imm);
+}
+
+void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r2;
+
+ r2 = vcpu_get_gr(vcpu, inst.M29.r2);
+ vcpu_set_itc(vcpu, r2);
+}
+
+
+void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r1;
+
+ r1 = vcpu_get_itc(vcpu);
+ vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
+}
+/**************************************************************************
+ struct kvm_vcpu*protection key register access routines
+ **************************************************************************/
+
+unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
+{
+ return ((unsigned long)ia64_get_pkr(reg));
+}
+
+void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
+{
+ ia64_set_pkr(reg, val);
+}
+
+
+unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa)
+{
+ union ia64_rr rr, rr1;
+
+ rr.val = vcpu_get_rr(vcpu, ifa);
+ rr1.val = 0;
+ rr1.ps = rr.ps;
+ rr1.rid = rr.rid;
+ return (rr1.val);
+}
+
+
+
+/********************************
+ * Moves to privileged registers
+ ********************************/
+unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
+ unsigned long val)
+{
+ union ia64_rr oldrr, newrr;
+ unsigned long rrval;
+ struct exit_ctl_data *p = &vcpu->arch.exit_data;
+ unsigned long psr;
+
+ oldrr.val = vcpu_get_rr(vcpu, reg);
+ newrr.val = val;
+ vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
+
+ switch ((unsigned long)(reg >> VRN_SHIFT)) {
+ case VRN6:
+ vcpu->arch.vmm_rr = vrrtomrr(val);
+ local_irq_save(psr);
+ p->exit_reason = EXIT_REASON_SWITCH_RR6;
+ vmm_transition(vcpu);
+ local_irq_restore(psr);
+ break;
+ case VRN4:
+ rrval = vrrtomrr(val);
+ vcpu->arch.metaphysical_saved_rr4 = rrval;
+ if (!is_physical_mode(vcpu))
+ ia64_set_rr(reg, rrval);
+ break;
+ case VRN0:
+ rrval = vrrtomrr(val);
+ vcpu->arch.metaphysical_saved_rr0 = rrval;
+ if (!is_physical_mode(vcpu))
+ ia64_set_rr(reg, rrval);
+ break;
+ default:
+ ia64_set_rr(reg, vrrtomrr(val));
+ break;
+ }
+
+ return (IA64_NO_FAULT);
+}
+
+
+
+void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r2;
+
+ r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+ r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+ vcpu_set_rr(vcpu, r3, r2);
+}
+
+void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+}
+
+void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+}
+
+void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r2;
+
+ r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+ r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+ vcpu_set_pmc(vcpu, r3, r2);
+}
+
+void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r2;
+
+ r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+ r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+ vcpu_set_pmd(vcpu, r3, r2);
+}
+
+void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ u64 r3, r2;
+
+ r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+ r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+ vcpu_set_pkr(vcpu, r3, r2);
+}
+
+
+
+void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r1;
+
+ r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+ r1 = vcpu_get_rr(vcpu, r3);
+ vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r1;
+
+ r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+ r1 = vcpu_get_pkr(vcpu, r3);
+ vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r1;
+
+ r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+ r1 = vcpu_get_dbr(vcpu, r3);
+ vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r1;
+
+ r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+ r1 = vcpu_get_ibr(vcpu, r3);
+ vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r1;
+
+ r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+ r1 = vcpu_get_pmc(vcpu, r3);
+ vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+
+unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
+{
+ /* FIXME: This could get called as a result of a rsvd-reg fault */
+ if (reg > (ia64_get_cpuid(3) & 0xff))
+ return 0;
+ else
+ return ia64_get_cpuid(reg);
+}
+
+void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r1;
+
+ r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+ r1 = vcpu_get_cpuid(vcpu, r3);
+ vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ VCPU(vcpu, tpr) = val;
+ vcpu->arch.irq_check = 1;
+}
+
+unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r2;
+
+ r2 = vcpu_get_gr(vcpu, inst.M32.r2);
+ VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
+
+ switch (inst.M32.cr3) {
+ case 0:
+ vcpu_set_dcr(vcpu, r2);
+ break;
+ case 1:
+ vcpu_set_itm(vcpu, r2);
+ break;
+ case 66:
+ vcpu_set_tpr(vcpu, r2);
+ break;
+ case 67:
+ vcpu_set_eoi(vcpu, r2);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+
+unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long tgt = inst.M33.r1;
+ unsigned long val;
+
+ switch (inst.M33.cr3) {
+ case 65:
+ val = vcpu_get_ivr(vcpu);
+ vcpu_set_gr(vcpu, tgt, val, 0);
+ break;
+
+ case 67:
+ vcpu_set_gr(vcpu, tgt, 0L, 0);
+ break;
+ default:
+ val = VCPU(vcpu, vcr[inst.M33.cr3]);
+ vcpu_set_gr(vcpu, tgt, val, 0);
+ break;
+ }
+
+ return 0;
+}
+
+
+
+void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
+{
+
+ unsigned long mask;
+ struct kvm_pt_regs *regs;
+ struct ia64_psr old_psr, new_psr;
+
+ old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+
+ regs = vcpu_regs(vcpu);
+ /* We only support guest as:
+ * vpsr.pk = 0
+ * vpsr.is = 0
+ * Otherwise panic
+ */
+ if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
+ panic_vm(vcpu);
+
+ /*
+ * For those IA64_PSR bits: id/da/dd/ss/ed/ia
+ * Since these bits will become 0, after success execution of each
+ * instruction, we will change set them to mIA64_PSR
+ */
+ VCPU(vcpu, vpsr) = val
+ & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
+ IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
+
+ if (!old_psr.i && (val & IA64_PSR_I)) {
+ /* vpsr.i 0->1 */
+ vcpu->arch.irq_check = 1;
+ }
+ new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+
+ /*
+ * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
+ * , except for the following bits:
+ * ic/i/dt/si/rt/mc/it/bn/vm
+ */
+ mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
+ IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
+ IA64_PSR_VM;
+
+ regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
+
+ check_mm_mode_switch(vcpu, old_psr, new_psr);
+
+ return ;
+}
+
+unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
+{
+ struct ia64_psr vpsr;
+
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+ vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+
+ if (!vpsr.ic)
+ VCPU(vcpu, ifs) = regs->cr_ifs;
+ regs->cr_ifs = IA64_IFS_V;
+ return (IA64_NO_FAULT);
+}
+
+
+
+/**************************************************************************
+ VCPU banked general register access routines
+ **************************************************************************/
+#define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
+ do { \
+ __asm__ __volatile__ ( \
+ ";;extr.u %0 = %3,%6,16;;\n" \
+ "dep %1 = %0, %1, 0, 16;;\n" \
+ "st8 [%4] = %1\n" \
+ "extr.u %0 = %2, 16, 16;;\n" \
+ "dep %3 = %0, %3, %6, 16;;\n" \
+ "st8 [%5] = %3\n" \
+ ::"r"(i), "r"(*b1unat), "r"(*b0unat), \
+ "r"(*runat), "r"(b1unat), "r"(runat), \
+ "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
+ } while (0)
+
+void vcpu_bsw0(struct kvm_vcpu *vcpu)
+{
+ unsigned long i;
+
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+ unsigned long *r = &regs->r16;
+ unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
+ unsigned long *b1 = &VCPU(vcpu, vgr[0]);
+ unsigned long *runat = &regs->eml_unat;
+ unsigned long *b0unat = &VCPU(vcpu, vbnat);
+ unsigned long *b1unat = &VCPU(vcpu, vnat);
+
+
+ if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
+ for (i = 0; i < 16; i++) {
+ *b1++ = *r;
+ *r++ = *b0++;
+ }
+ vcpu_bsw0_unat(i, b0unat, b1unat, runat,
+ VMM_PT_REGS_R16_SLOT);
+ VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
+ }
+}
+
+#define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
+ do { \
+ __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \
+ "dep %1 = %0, %1, 16, 16;;\n" \
+ "st8 [%4] = %1\n" \
+ "extr.u %0 = %2, 0, 16;;\n" \
+ "dep %3 = %0, %3, %6, 16;;\n" \
+ "st8 [%5] = %3\n" \
+ ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
+ "r"(*runat), "r"(b0unat), "r"(runat), \
+ "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
+ } while (0)
+
+void vcpu_bsw1(struct kvm_vcpu *vcpu)
+{
+ unsigned long i;
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+ unsigned long *r = &regs->r16;
+ unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
+ unsigned long *b1 = &VCPU(vcpu, vgr[0]);
+ unsigned long *runat = &regs->eml_unat;
+ unsigned long *b0unat = &VCPU(vcpu, vbnat);
+ unsigned long *b1unat = &VCPU(vcpu, vnat);
+
+ if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
+ for (i = 0; i < 16; i++) {
+ *b0++ = *r;
+ *r++ = *b1++;
+ }
+ vcpu_bsw1_unat(i, b0unat, b1unat, runat,
+ VMM_PT_REGS_R16_SLOT);
+ VCPU(vcpu, vpsr) |= IA64_PSR_BN;
+ }
+}
+
+
+
+
+void vcpu_rfi(struct kvm_vcpu *vcpu)
+{
+ unsigned long ifs, psr;
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+ psr = VCPU(vcpu, ipsr);
+ if (psr & IA64_PSR_BN)
+ vcpu_bsw1(vcpu);
+ else
+ vcpu_bsw0(vcpu);
+ vcpu_set_psr(vcpu, psr);
+ ifs = VCPU(vcpu, ifs);
+ if (ifs >> 63)
+ regs->cr_ifs = ifs;
+ regs->cr_iip = VCPU(vcpu, iip);
+}
+
+
+/*
+ VPSR can't keep track of below bits of guest PSR
+ This function gets guest PSR
+ */
+
+unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
+{
+ unsigned long mask;
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+ mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
+ IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
+ return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
+}
+
+void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long vpsr;
+ unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
+ | inst.M44.imm;
+
+ vpsr = vcpu_get_psr(vcpu);
+ vpsr &= (~imm24);
+ vcpu_set_psr(vcpu, vpsr);
+}
+
+void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long vpsr;
+ unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
+ | inst.M44.imm;
+
+ vpsr = vcpu_get_psr(vcpu);
+ vpsr |= imm24;
+ vcpu_set_psr(vcpu, vpsr);
+}
+
+/* Generate Mask
+ * Parameter:
+ * bit -- starting bit
+ * len -- how many bits
+ */
+#define MASK(bit,len) \
+({ \
+ __u64 ret; \
+ \
+ __asm __volatile("dep %0=-1, r0, %1, %2"\
+ : "=r" (ret): \
+ "M" (bit), \
+ "M" (len)); \
+ ret; \
+})
+
+void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
+ vcpu_set_psr(vcpu, val);
+}
+
+void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long val;
+
+ val = vcpu_get_gr(vcpu, inst.M35.r2);
+ vcpu_set_psr_l(vcpu, val);
+}
+
+void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long val;
+
+ val = vcpu_get_psr(vcpu);
+ val = (val & MASK(0, 32)) | (val & MASK(35, 2));
+ vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
+}
+
+void vcpu_increment_iip(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+ struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
+ if (ipsr->ri == 2) {
+ ipsr->ri = 0;
+ regs->cr_iip += 16;
+ } else
+ ipsr->ri++;
+}
+
+void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+ struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
+
+ if (ipsr->ri == 0) {
+ ipsr->ri = 2;
+ regs->cr_iip -= 16;
+ } else
+ ipsr->ri--;
+}
+
+/** Emulate a privileged operation.
+ *
+ *
+ * @param vcpu virtual cpu
+ * @cause the reason cause virtualization fault
+ * @opcode the instruction code which cause virtualization fault
+ */
+
+void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
+{
+ unsigned long status, cause, opcode ;
+ INST64 inst;
+
+ status = IA64_NO_FAULT;
+ cause = VMX(vcpu, cause);
+ opcode = VMX(vcpu, opcode);
+ inst.inst = opcode;
+ /*
+ * Switch to actual virtual rid in rr0 and rr4,
+ * which is required by some tlb related instructions.
+ */
+ prepare_if_physical_mode(vcpu);
+
+ switch (cause) {
+ case EVENT_RSM:
+ kvm_rsm(vcpu, inst);
+ break;
+ case EVENT_SSM:
+ kvm_ssm(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_PSR:
+ kvm_mov_to_psr(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_PSR:
+ kvm_mov_from_psr(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_CR:
+ kvm_mov_from_cr(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_CR:
+ kvm_mov_to_cr(vcpu, inst);
+ break;
+ case EVENT_BSW_0:
+ vcpu_bsw0(vcpu);
+ break;
+ case EVENT_BSW_1:
+ vcpu_bsw1(vcpu);
+ break;
+ case EVENT_COVER:
+ vcpu_cover(vcpu);
+ break;
+ case EVENT_RFI:
+ vcpu_rfi(vcpu);
+ break;
+ case EVENT_ITR_D:
+ kvm_itr_d(vcpu, inst);
+ break;
+ case EVENT_ITR_I:
+ kvm_itr_i(vcpu, inst);
+ break;
+ case EVENT_PTR_D:
+ kvm_ptr_d(vcpu, inst);
+ break;
+ case EVENT_PTR_I:
+ kvm_ptr_i(vcpu, inst);
+ break;
+ case EVENT_ITC_D:
+ kvm_itc_d(vcpu, inst);
+ break;
+ case EVENT_ITC_I:
+ kvm_itc_i(vcpu, inst);
+ break;
+ case EVENT_PTC_L:
+ kvm_ptc_l(vcpu, inst);
+ break;
+ case EVENT_PTC_G:
+ kvm_ptc_g(vcpu, inst);
+ break;
+ case EVENT_PTC_GA:
+ kvm_ptc_ga(vcpu, inst);
+ break;
+ case EVENT_PTC_E:
+ kvm_ptc_e(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_RR:
+ kvm_mov_to_rr(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_RR:
+ kvm_mov_from_rr(vcpu, inst);
+ break;
+ case EVENT_THASH:
+ kvm_thash(vcpu, inst);
+ break;
+ case EVENT_TTAG:
+ kvm_ttag(vcpu, inst);
+ break;
+ case EVENT_TPA:
+ status = kvm_tpa(vcpu, inst);
+ break;
+ case EVENT_TAK:
+ kvm_tak(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_AR_IMM:
+ kvm_mov_to_ar_imm(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_AR:
+ kvm_mov_to_ar_reg(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_AR:
+ kvm_mov_from_ar_reg(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_DBR:
+ kvm_mov_to_dbr(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_IBR:
+ kvm_mov_to_ibr(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_PMC:
+ kvm_mov_to_pmc(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_PMD:
+ kvm_mov_to_pmd(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_PKR:
+ kvm_mov_to_pkr(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_DBR:
+ kvm_mov_from_dbr(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_IBR:
+ kvm_mov_from_ibr(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_PMC:
+ kvm_mov_from_pmc(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_PKR:
+ kvm_mov_from_pkr(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_CPUID:
+ kvm_mov_from_cpuid(vcpu, inst);
+ break;
+ case EVENT_VMSW:
+ status = IA64_FAULT;
+ break;
+ default:
+ break;
+ };
+ /*Assume all status is NO_FAULT ?*/
+ if (status == IA64_NO_FAULT && cause != EVENT_RFI)
+ vcpu_increment_iip(vcpu);
+
+ recover_if_physical_mode(vcpu);
+}
+
+void init_vcpu(struct kvm_vcpu *vcpu)
+{
+ int i;
+
+ vcpu->arch.mode_flags = GUEST_IN_PHY;
+ VMX(vcpu, vrr[0]) = 0x38;
+ VMX(vcpu, vrr[1]) = 0x38;
+ VMX(vcpu, vrr[2]) = 0x38;
+ VMX(vcpu, vrr[3]) = 0x38;
+ VMX(vcpu, vrr[4]) = 0x38;
+ VMX(vcpu, vrr[5]) = 0x38;
+ VMX(vcpu, vrr[6]) = 0x38;
+ VMX(vcpu, vrr[7]) = 0x38;
+ VCPU(vcpu, vpsr) = IA64_PSR_BN;
+ VCPU(vcpu, dcr) = 0;
+ /* pta.size must not be 0. The minimum is 15 (32k) */
+ VCPU(vcpu, pta) = 15 << 2;
+ VCPU(vcpu, itv) = 0x10000;
+ VCPU(vcpu, itm) = 0;
+ VMX(vcpu, last_itc) = 0;
+
+ VCPU(vcpu, lid) = VCPU_LID(vcpu);
+ VCPU(vcpu, ivr) = 0;
+ VCPU(vcpu, tpr) = 0x10000;
+ VCPU(vcpu, eoi) = 0;
+ VCPU(vcpu, irr[0]) = 0;
+ VCPU(vcpu, irr[1]) = 0;
+ VCPU(vcpu, irr[2]) = 0;
+ VCPU(vcpu, irr[3]) = 0;
+ VCPU(vcpu, pmv) = 0x10000;
+ VCPU(vcpu, cmcv) = 0x10000;
+ VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */
+ VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */
+ update_vhpi(vcpu, NULL_VECTOR);
+ VLSAPIC_XTP(vcpu) = 0x80; /* disabled */
+
+ for (i = 0; i < 4; i++)
+ VLSAPIC_INSVC(vcpu, i) = 0;
+}
+
+void kvm_init_all_rr(struct kvm_vcpu *vcpu)
+{
+ unsigned long psr;
+
+ local_irq_save(psr);
+
+ /* WARNING: not allow co-exist of both virtual mode and physical
+ * mode in same region
+ */
+
+ vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
+ vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
+
+ if (is_physical_mode(vcpu)) {
+ if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
+ panic_vm(vcpu);
+
+ ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
+ ia64_dv_serialize_data();
+ } else {
+ ia64_set_rr((VRN0 << VRN_SHIFT),
+ vcpu->arch.metaphysical_saved_rr0);
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN4 << VRN_SHIFT),
+ vcpu->arch.metaphysical_saved_rr4);
+ ia64_dv_serialize_data();
+ }
+ ia64_set_rr((VRN1 << VRN_SHIFT),
+ vrrtomrr(VMX(vcpu, vrr[VRN1])));
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN2 << VRN_SHIFT),
+ vrrtomrr(VMX(vcpu, vrr[VRN2])));
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN3 << VRN_SHIFT),
+ vrrtomrr(VMX(vcpu, vrr[VRN3])));
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN5 << VRN_SHIFT),
+ vrrtomrr(VMX(vcpu, vrr[VRN5])));
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN7 << VRN_SHIFT),
+ vrrtomrr(VMX(vcpu, vrr[VRN7])));
+ ia64_dv_serialize_data();
+ ia64_srlz_d();
+ ia64_set_psr(psr);
+}
+
+int vmm_entry(void)
+{
+ struct kvm_vcpu *v;
+ v = current_vcpu;
+
+ ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
+ 0, 0, 0, 0, 0, 0);
+ kvm_init_vtlb(v);
+ kvm_init_vhpt(v);
+ init_vcpu(v);
+ kvm_init_all_rr(v);
+ vmm_reset_entry();
+
+ return 0;
+}
+
+void panic_vm(struct kvm_vcpu *v)
+{
+ struct exit_ctl_data *p = &v->arch.exit_data;
+
+ p->exit_reason = EXIT_REASON_VM_PANIC;
+ vmm_transition(v);
+ /*Never to return*/
+ while (1);
+}
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
new file mode 100644
index 000000000000..b0fcfb62c49e
--- /dev/null
+++ b/arch/ia64/kvm/vcpu.h
@@ -0,0 +1,740 @@
+/*
+ * vcpu.h: vcpu routines
+ * Copyright (c) 2005, Intel Corporation.
+ * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
+ * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
+ *
+ * Copyright (c) 2007, Intel Corporation.
+ * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
+ * Xiantao Zhang (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+
+#ifndef __KVM_VCPU_H__
+#define __KVM_VCPU_H__
+
+#include <asm/types.h>
+#include <asm/fpu.h>
+#include <asm/processor.h>
+
+#ifndef __ASSEMBLY__
+#include "vti.h"
+
+#include <linux/kvm_host.h>
+#include <linux/spinlock.h>
+
+typedef unsigned long IA64_INST;
+
+typedef union U_IA64_BUNDLE {
+ unsigned long i64[2];
+ struct { unsigned long template:5, slot0:41, slot1a:18,
+ slot1b:23, slot2:41; };
+ /* NOTE: following doesn't work because bitfields can't cross natural
+ size boundaries
+ struct { unsigned long template:5, slot0:41, slot1:41, slot2:41; }; */
+} IA64_BUNDLE;
+
+typedef union U_INST64_A5 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, imm7b:7, r3:2, imm5c:5,
+ imm9d:9, s:1, major:4; };
+} INST64_A5;
+
+typedef union U_INST64_B4 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6,
+ wh:2, d:1, un1:1, major:4; };
+} INST64_B4;
+
+typedef union U_INST64_B8 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, un21:21, x6:6, un4:4, major:4; };
+} INST64_B8;
+
+typedef union U_INST64_B9 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; };
+} INST64_B9;
+
+typedef union U_INST64_I19 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; };
+} INST64_I19;
+
+typedef union U_INST64_I26 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
+} INST64_I26;
+
+typedef union U_INST64_I27 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4; };
+} INST64_I27;
+
+typedef union U_INST64_I28 { /* not privileged (mov from AR) */
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
+} INST64_I28;
+
+typedef union U_INST64_M28 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :14, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M28;
+
+typedef union U_INST64_M29 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M29;
+
+typedef union U_INST64_M30 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, imm:7, ar3:7, x4:4, x2:2,
+ x3:3, s:1, major:4; };
+} INST64_M30;
+
+typedef union U_INST64_M31 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M31;
+
+typedef union U_INST64_M32 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M32;
+
+typedef union U_INST64_M33 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M33;
+
+typedef union U_INST64_M35 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
+
+} INST64_M35;
+
+typedef union U_INST64_M36 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; };
+} INST64_M36;
+
+typedef union U_INST64_M37 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, imm20a:20, :1, x4:4, x2:2, x3:3,
+ i:1, major:4; };
+} INST64_M37;
+
+typedef union U_INST64_M41 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
+} INST64_M41;
+
+typedef union U_INST64_M42 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M42;
+
+typedef union U_INST64_M43 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M43;
+
+typedef union U_INST64_M44 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; };
+} INST64_M44;
+
+typedef union U_INST64_M45 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M45;
+
+typedef union U_INST64_M46 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, un7:7, r3:7, x6:6,
+ x3:3, un1:1, major:4; };
+} INST64_M46;
+
+typedef union U_INST64_M47 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
+} INST64_M47;
+
+typedef union U_INST64_M1{
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2,
+ x6:6, m:1, major:4; };
+} INST64_M1;
+
+typedef union U_INST64_M2{
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2,
+ x6:6, m:1, major:4; };
+} INST64_M2;
+
+typedef union U_INST64_M3{
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2,
+ x6:6, s:1, major:4; };
+} INST64_M3;
+
+typedef union U_INST64_M4 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2,
+ x6:6, m:1, major:4; };
+} INST64_M4;
+
+typedef union U_INST64_M5 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2,
+ x6:6, s:1, major:4; };
+} INST64_M5;
+
+typedef union U_INST64_M6 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2,
+ x6:6, m:1, major:4; };
+} INST64_M6;
+
+typedef union U_INST64_M9 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, f2:7, r3:7, x:1, hint:2,
+ x6:6, m:1, major:4; };
+} INST64_M9;
+
+typedef union U_INST64_M10 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, imm7:7, f2:7, r3:7, i:1, hint:2,
+ x6:6, s:1, major:4; };
+} INST64_M10;
+
+typedef union U_INST64_M12 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, f1:7, f2:7, r3:7, x:1, hint:2,
+ x6:6, m:1, major:4; };
+} INST64_M12;
+
+typedef union U_INST64_M15 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, imm7:7, r3:7, i:1, hint:2,
+ x6:6, s:1, major:4; };
+} INST64_M15;
+
+typedef union U_INST64 {
+ IA64_INST inst;
+ struct { unsigned long :37, major:4; } generic;
+ INST64_A5 A5; /* used in build_hypercall_bundle only */
+ INST64_B4 B4; /* used in build_hypercall_bundle only */
+ INST64_B8 B8; /* rfi, bsw.[01] */
+ INST64_B9 B9; /* break.b */
+ INST64_I19 I19; /* used in build_hypercall_bundle only */
+ INST64_I26 I26; /* mov register to ar (I unit) */
+ INST64_I27 I27; /* mov immediate to ar (I unit) */
+ INST64_I28 I28; /* mov from ar (I unit) */
+ INST64_M1 M1; /* ld integer */
+ INST64_M2 M2;
+ INST64_M3 M3;
+ INST64_M4 M4; /* st integer */
+ INST64_M5 M5;
+ INST64_M6 M6; /* ldfd floating pointer */
+ INST64_M9 M9; /* stfd floating pointer */
+ INST64_M10 M10; /* stfd floating pointer */
+ INST64_M12 M12; /* ldfd pair floating pointer */
+ INST64_M15 M15; /* lfetch + imm update */
+ INST64_M28 M28; /* purge translation cache entry */
+ INST64_M29 M29; /* mov register to ar (M unit) */
+ INST64_M30 M30; /* mov immediate to ar (M unit) */
+ INST64_M31 M31; /* mov from ar (M unit) */
+ INST64_M32 M32; /* mov reg to cr */
+ INST64_M33 M33; /* mov from cr */
+ INST64_M35 M35; /* mov to psr */
+ INST64_M36 M36; /* mov from psr */
+ INST64_M37 M37; /* break.m */
+ INST64_M41 M41; /* translation cache insert */
+ INST64_M42 M42; /* mov to indirect reg/translation reg insert*/
+ INST64_M43 M43; /* mov from indirect reg */
+ INST64_M44 M44; /* set/reset system mask */
+ INST64_M45 M45; /* translation purge */
+ INST64_M46 M46; /* translation access (tpa,tak) */
+ INST64_M47 M47; /* purge translation entry */
+} INST64;
+
+#define MASK_41 ((unsigned long)0x1ffffffffff)
+
+/* Virtual address memory attributes encoding */
+#define VA_MATTR_WB 0x0
+#define VA_MATTR_UC 0x4
+#define VA_MATTR_UCE 0x5
+#define VA_MATTR_WC 0x6
+#define VA_MATTR_NATPAGE 0x7
+
+#define PMASK(size) (~((size) - 1))
+#define PSIZE(size) (1UL<<(size))
+#define CLEARLSB(ppn, nbits) (((ppn) >> (nbits)) << (nbits))
+#define PAGEALIGN(va, ps) CLEARLSB(va, ps)
+#define PAGE_FLAGS_RV_MASK (0x2|(0x3UL<<50)|(((1UL<<11)-1)<<53))
+#define _PAGE_MA_ST (0x1 << 2) /* is reserved for software use */
+
+#define ARCH_PAGE_SHIFT 12
+
+#define INVALID_TI_TAG (1UL << 63)
+
+#define VTLB_PTE_P_BIT 0
+#define VTLB_PTE_IO_BIT 60
+#define VTLB_PTE_IO (1UL<<VTLB_PTE_IO_BIT)
+#define VTLB_PTE_P (1UL<<VTLB_PTE_P_BIT)
+
+#define vcpu_quick_region_check(_tr_regions,_ifa) \
+ (_tr_regions & (1 << ((unsigned long)_ifa >> 61)))
+
+#define vcpu_quick_region_set(_tr_regions,_ifa) \
+ do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
+
+static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir,
+ u64 va, u64 rid)
+{
+ trp->page_flags = pte;
+ trp->itir = itir;
+ trp->vadr = va;
+ trp->rid = rid;
+}
+
+extern u64 kvm_lookup_mpa(u64 gpfn);
+extern u64 kvm_gpa_to_mpa(u64 gpa);
+
+/* Return I/O type if trye */
+#define __gpfn_is_io(gpfn) \
+ ({ \
+ u64 pte, ret = 0; \
+ pte = kvm_lookup_mpa(gpfn); \
+ if (!(pte & GPFN_INV_MASK)) \
+ ret = pte & GPFN_IO_MASK; \
+ ret; \
+ })
+
+#endif
+
+#define IA64_NO_FAULT 0
+#define IA64_FAULT 1
+
+#define VMM_RBS_OFFSET ((VMM_TASK_SIZE + 15) & ~15)
+
+#define SW_BAD 0 /* Bad mode transitition */
+#define SW_V2P 1 /* Physical emulatino is activated */
+#define SW_P2V 2 /* Exit physical mode emulation */
+#define SW_SELF 3 /* No mode transition */
+#define SW_NOP 4 /* Mode transition, but without action required */
+
+#define GUEST_IN_PHY 0x1
+#define GUEST_PHY_EMUL 0x2
+
+#define current_vcpu ((struct kvm_vcpu *) ia64_getreg(_IA64_REG_TP))
+
+#define VRN_SHIFT 61
+#define VRN_MASK 0xe000000000000000
+#define VRN0 0x0UL
+#define VRN1 0x1UL
+#define VRN2 0x2UL
+#define VRN3 0x3UL
+#define VRN4 0x4UL
+#define VRN5 0x5UL
+#define VRN6 0x6UL
+#define VRN7 0x7UL
+
+#define IRQ_NO_MASKED 0
+#define IRQ_MASKED_BY_VTPR 1
+#define IRQ_MASKED_BY_INSVC 2 /* masked by inservice IRQ */
+
+#define PTA_BASE_SHIFT 15
+
+#define IA64_PSR_VM_BIT 46
+#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT)
+
+/* Interruption Function State */
+#define IA64_IFS_V_BIT 63
+#define IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT)
+
+#define PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX)
+#define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/gcc_intrin.h>
+
+#define is_physical_mode(v) \
+ ((v->arch.mode_flags) & GUEST_IN_PHY)
+
+#define is_virtual_mode(v) \
+ (!is_physical_mode(v))
+
+#define MODE_IND(psr) \
+ (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
+
+#define _vmm_raw_spin_lock(x) \
+ do { \
+ __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
+ __u64 ia64_spinlock_val; \
+ ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
+ if (unlikely(ia64_spinlock_val)) { \
+ do { \
+ while (*ia64_spinlock_ptr) \
+ ia64_barrier(); \
+ ia64_spinlock_val = \
+ ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
+ } while (ia64_spinlock_val); \
+ } \
+ } while (0)
+
+#define _vmm_raw_spin_unlock(x) \
+ do { barrier(); \
+ ((spinlock_t *)x)->raw_lock.lock = 0; } \
+while (0)
+
+void vmm_spin_lock(spinlock_t *lock);
+void vmm_spin_unlock(spinlock_t *lock);
+enum {
+ I_TLB = 1,
+ D_TLB = 2
+};
+
+union kvm_va {
+ struct {
+ unsigned long off : 60; /* intra-region offset */
+ unsigned long reg : 4; /* region number */
+ } f;
+ unsigned long l;
+ void *p;
+};
+
+#define __kvm_pa(x) ({union kvm_va _v; _v.l = (long) (x); \
+ _v.f.reg = 0; _v.l; })
+#define __kvm_va(x) ({union kvm_va _v; _v.l = (long) (x); \
+ _v.f.reg = -1; _v.p; })
+
+#define _REGION_ID(x) ({union ia64_rr _v; _v.val = (long)(x); \
+ _v.rid; })
+#define _REGION_PAGE_SIZE(x) ({union ia64_rr _v; _v.val = (long)(x); \
+ _v.ps; })
+#define _REGION_HW_WALKER(x) ({union ia64_rr _v; _v.val = (long)(x); \
+ _v.ve; })
+
+enum vhpt_ref{ DATA_REF, NA_REF, INST_REF, RSE_REF };
+enum tlb_miss_type { INSTRUCTION, DATA, REGISTER };
+
+#define VCPU(_v, _x) ((_v)->arch.vpd->_x)
+#define VMX(_v, _x) ((_v)->arch._x)
+
+#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
+#define VLSAPIC_XTP(_v) VMX(_v, xtp)
+
+static inline unsigned long itir_ps(unsigned long itir)
+{
+ return ((itir >> 2) & 0x3f);
+}
+
+
+/**************************************************************************
+ VCPU control register access routines
+ **************************************************************************/
+
+static inline u64 vcpu_get_itir(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, itir));
+}
+
+static inline void vcpu_set_itir(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, itir) = val;
+}
+
+static inline u64 vcpu_get_ifa(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, ifa));
+}
+
+static inline void vcpu_set_ifa(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, ifa) = val;
+}
+
+static inline u64 vcpu_get_iva(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, iva));
+}
+
+static inline u64 vcpu_get_pta(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, pta));
+}
+
+static inline u64 vcpu_get_lid(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, lid));
+}
+
+static inline u64 vcpu_get_tpr(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, tpr));
+}
+
+static inline u64 vcpu_get_eoi(struct kvm_vcpu *vcpu)
+{
+ return (0UL); /*reads of eoi always return 0 */
+}
+
+static inline u64 vcpu_get_irr0(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, irr[0]));
+}
+
+static inline u64 vcpu_get_irr1(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, irr[1]));
+}
+
+static inline u64 vcpu_get_irr2(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, irr[2]));
+}
+
+static inline u64 vcpu_get_irr3(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, irr[3]));
+}
+
+static inline void vcpu_set_dcr(struct kvm_vcpu *vcpu, u64 val)
+{
+ ia64_setreg(_IA64_REG_CR_DCR, val);
+}
+
+static inline void vcpu_set_isr(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, isr) = val;
+}
+
+static inline void vcpu_set_lid(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, lid) = val;
+}
+
+static inline void vcpu_set_ipsr(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, ipsr) = val;
+}
+
+static inline void vcpu_set_iip(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, iip) = val;
+}
+
+static inline void vcpu_set_ifs(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, ifs) = val;
+}
+
+static inline void vcpu_set_iipa(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, iipa) = val;
+}
+
+static inline void vcpu_set_iha(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, iha) = val;
+}
+
+
+static inline u64 vcpu_get_rr(struct kvm_vcpu *vcpu, u64 reg)
+{
+ return vcpu->arch.vrr[reg>>61];
+}
+
+/**************************************************************************
+ VCPU debug breakpoint register access routines
+ **************************************************************************/
+
+static inline void vcpu_set_dbr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
+{
+ __ia64_set_dbr(reg, val);
+}
+
+static inline void vcpu_set_ibr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
+{
+ ia64_set_ibr(reg, val);
+}
+
+static inline u64 vcpu_get_dbr(struct kvm_vcpu *vcpu, u64 reg)
+{
+ return ((u64)__ia64_get_dbr(reg));
+}
+
+static inline u64 vcpu_get_ibr(struct kvm_vcpu *vcpu, u64 reg)
+{
+ return ((u64)ia64_get_ibr(reg));
+}
+
+/**************************************************************************
+ VCPU performance monitor register access routines
+ **************************************************************************/
+static inline void vcpu_set_pmc(struct kvm_vcpu *vcpu, u64 reg, u64 val)
+{
+ /* NOTE: Writes to unimplemented PMC registers are discarded */
+ ia64_set_pmc(reg, val);
+}
+
+static inline void vcpu_set_pmd(struct kvm_vcpu *vcpu, u64 reg, u64 val)
+{
+ /* NOTE: Writes to unimplemented PMD registers are discarded */
+ ia64_set_pmd(reg, val);
+}
+
+static inline u64 vcpu_get_pmc(struct kvm_vcpu *vcpu, u64 reg)
+{
+ /* NOTE: Reads from unimplemented PMC registers return zero */
+ return ((u64)ia64_get_pmc(reg));
+}
+
+static inline u64 vcpu_get_pmd(struct kvm_vcpu *vcpu, u64 reg)
+{
+ /* NOTE: Reads from unimplemented PMD registers return zero */
+ return ((u64)ia64_get_pmd(reg));
+}
+
+static inline unsigned long vrrtomrr(unsigned long val)
+{
+ union ia64_rr rr;
+ rr.val = val;
+ rr.rid = (rr.rid << 4) | 0xe;
+ if (rr.ps > PAGE_SHIFT)
+ rr.ps = PAGE_SHIFT;
+ rr.ve = 1;
+ return rr.val;
+}
+
+
+static inline int highest_bits(int *dat)
+{
+ u32 bits, bitnum;
+ int i;
+
+ /* loop for all 256 bits */
+ for (i = 7; i >= 0 ; i--) {
+ bits = dat[i];
+ if (bits) {
+ bitnum = fls(bits);
+ return i * 32 + bitnum - 1;
+ }
+ }
+ return NULL_VECTOR;
+}
+
+/*
+ * The pending irq is higher than the inservice one.
+ *
+ */
+static inline int is_higher_irq(int pending, int inservice)
+{
+ return ((pending > inservice)
+ || ((pending != NULL_VECTOR)
+ && (inservice == NULL_VECTOR)));
+}
+
+static inline int is_higher_class(int pending, int mic)
+{
+ return ((pending >> 4) > mic);
+}
+
+/*
+ * Return 0-255 for pending irq.
+ * NULL_VECTOR: when no pending.
+ */
+static inline int highest_pending_irq(struct kvm_vcpu *vcpu)
+{
+ if (VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR))
+ return NMI_VECTOR;
+ if (VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR))
+ return ExtINT_VECTOR;
+
+ return highest_bits((int *)&VCPU(vcpu, irr[0]));
+}
+
+static inline int highest_inservice_irq(struct kvm_vcpu *vcpu)
+{
+ if (VMX(vcpu, insvc[0]) & (1UL<<NMI_VECTOR))
+ return NMI_VECTOR;
+ if (VMX(vcpu, insvc[0]) & (1UL<<ExtINT_VECTOR))
+ return ExtINT_VECTOR;
+
+ return highest_bits((int *)&(VMX(vcpu, insvc[0])));
+}
+
+extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, u64 reg,
+ struct ia64_fpreg *val);
+extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, u64 reg,
+ struct ia64_fpreg *val);
+extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, u64 reg);
+extern void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 val, int nat);
+extern u64 vcpu_get_psr(struct kvm_vcpu *vcpu);
+extern void vcpu_set_psr(struct kvm_vcpu *vcpu, u64 val);
+extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr);
+extern void vcpu_bsw0(struct kvm_vcpu *vcpu);
+extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte,
+ u64 itir, u64 va, int type);
+extern struct thash_data *vhpt_lookup(u64 va);
+extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
+extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps);
+extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps);
+extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va);
+extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
+ u64 itir, u64 ifa, int type);
+extern void thash_purge_all(struct kvm_vcpu *v);
+extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v,
+ u64 va, int is_data);
+extern int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va,
+ u64 ps, int is_data);
+
+extern void vcpu_increment_iip(struct kvm_vcpu *v);
+extern void vcpu_decrement_iip(struct kvm_vcpu *vcpu);
+extern void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
+extern void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
+extern void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr);
+extern void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr);
+extern void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr);
+extern void nested_dtlb(struct kvm_vcpu *vcpu);
+extern void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr);
+extern int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref);
+
+extern void update_vhpi(struct kvm_vcpu *vcpu, int vec);
+extern int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice);
+
+extern int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle);
+extern void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma);
+extern void vmm_transition(struct kvm_vcpu *vcpu);
+extern void vmm_trampoline(union context *from, union context *to);
+extern int vmm_entry(void);
+extern u64 vcpu_get_itc(struct kvm_vcpu *vcpu);
+
+extern void vmm_reset_entry(void);
+void kvm_init_vtlb(struct kvm_vcpu *v);
+void kvm_init_vhpt(struct kvm_vcpu *v);
+void thash_init(struct thash_cb *hcb, u64 sz);
+
+void panic_vm(struct kvm_vcpu *v);
+
+extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
+ u64 arg4, u64 arg5, u64 arg6, u64 arg7);
+#endif
+#endif /* __VCPU_H__ */
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
new file mode 100644
index 000000000000..2275bf4e681a
--- /dev/null
+++ b/arch/ia64/kvm/vmm.c
@@ -0,0 +1,66 @@
+/*
+ * vmm.c: vmm module interface with kvm module
+ *
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * Xiantao Zhang (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+
+#include<linux/module.h>
+#include<asm/fpswa.h>
+
+#include "vcpu.h"
+
+MODULE_AUTHOR("Intel");
+MODULE_LICENSE("GPL");
+
+extern char kvm_ia64_ivt;
+extern fpswa_interface_t *vmm_fpswa_interface;
+
+struct kvm_vmm_info vmm_info = {
+ .module = THIS_MODULE,
+ .vmm_entry = vmm_entry,
+ .tramp_entry = vmm_trampoline,
+ .vmm_ivt = (unsigned long)&kvm_ia64_ivt,
+};
+
+static int __init kvm_vmm_init(void)
+{
+
+ vmm_fpswa_interface = fpswa_interface;
+
+ /*Register vmm data to kvm side*/
+ return kvm_init(&vmm_info, 1024, THIS_MODULE);
+}
+
+static void __exit kvm_vmm_exit(void)
+{
+ kvm_exit();
+ return ;
+}
+
+void vmm_spin_lock(spinlock_t *lock)
+{
+ _vmm_raw_spin_lock(lock);
+}
+
+void vmm_spin_unlock(spinlock_t *lock)
+{
+ _vmm_raw_spin_unlock(lock);
+}
+module_init(kvm_vmm_init)
+module_exit(kvm_vmm_exit)
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S
new file mode 100644
index 000000000000..3ee5f481c06d
--- /dev/null
+++ b/arch/ia64/kvm/vmm_ivt.S
@@ -0,0 +1,1424 @@
+/*
+ * /ia64/kvm_ivt.S
+ *
+ * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
+ * Stephane Eranian <eranian@hpl.hp.com>
+ * David Mosberger <davidm@hpl.hp.com>
+ * Copyright (C) 2000, 2002-2003 Intel Co
+ * Asit Mallick <asit.k.mallick@intel.com>
+ * Suresh Siddha <suresh.b.siddha@intel.com>
+ * Kenneth Chen <kenneth.w.chen@intel.com>
+ * Fenghua Yu <fenghua.yu@intel.com>
+ *
+ *
+ * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling
+ * for SMP
+ * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB
+ * handler now uses virtual PT.
+ *
+ * 07/6/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
+ * Supporting Intel virtualization architecture
+ *
+ */
+
+/*
+ * This file defines the interruption vector table used by the CPU.
+ * It does not include one entry per possible cause of interruption.
+ *
+ * The first 20 entries of the table contain 64 bundles each while the
+ * remaining 48 entries contain only 16 bundles each.
+ *
+ * The 64 bundles are used to allow inlining the whole handler for
+ * critical
+ * interruptions like TLB misses.
+ *
+ * For each entry, the comment is as follows:
+ *
+ * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss
+ * (12,51)
+ * entry offset ----/ / / /
+ * /
+ * entry number ---------/ / /
+ * /
+ * size of the entry -------------/ /
+ * /
+ * vector name -------------------------------------/
+ * /
+ * interruptions triggering this vector
+ * ----------------------/
+ *
+ * The table is 32KB in size and must be aligned on 32KB
+ * boundary.
+ * (The CPU ignores the 15 lower bits of the address)
+ *
+ * Table is based upon EAS2.6 (Oct 1999)
+ */
+
+
+#include <asm/asmmacro.h>
+#include <asm/cache.h>
+#include <asm/pgtable.h>
+
+#include "asm-offsets.h"
+#include "vcpu.h"
+#include "kvm_minstate.h"
+#include "vti.h"
+
+#if 1
+# define PSR_DEFAULT_BITS psr.ac
+#else
+# define PSR_DEFAULT_BITS 0
+#endif
+
+
+#define KVM_FAULT(n) \
+ kvm_fault_##n:; \
+ mov r19=n;; \
+ br.sptk.many kvm_fault_##n; \
+ ;; \
+
+
+#define KVM_REFLECT(n) \
+ mov r31=pr; \
+ mov r19=n; /* prepare to save predicates */ \
+ mov r29=cr.ipsr; \
+ ;; \
+ tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
+(p7)br.sptk.many kvm_dispatch_reflection; \
+ br.sptk.many kvm_panic; \
+
+
+GLOBAL_ENTRY(kvm_panic)
+ br.sptk.many kvm_panic
+ ;;
+END(kvm_panic)
+
+
+
+
+
+ .section .text.ivt,"ax"
+
+ .align 32768 // align on 32KB boundary
+ .global kvm_ia64_ivt
+kvm_ia64_ivt:
+///////////////////////////////////////////////////////////////
+// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
+ENTRY(kvm_vhpt_miss)
+ KVM_FAULT(0)
+END(kvm_vhpt_miss)
+
+
+ .org kvm_ia64_ivt+0x400
+////////////////////////////////////////////////////////////////
+// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
+ENTRY(kvm_itlb_miss)
+ mov r31 = pr
+ mov r29=cr.ipsr;
+ ;;
+ tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
+ (p6) br.sptk kvm_alt_itlb_miss
+ mov r19 = 1
+ br.sptk kvm_itlb_miss_dispatch
+ KVM_FAULT(1);
+END(kvm_itlb_miss)
+
+ .org kvm_ia64_ivt+0x0800
+//////////////////////////////////////////////////////////////////
+// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
+ENTRY(kvm_dtlb_miss)
+ mov r31 = pr
+ mov r29=cr.ipsr;
+ ;;
+ tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
+(p6)br.sptk kvm_alt_dtlb_miss
+ br.sptk kvm_dtlb_miss_dispatch
+END(kvm_dtlb_miss)
+
+ .org kvm_ia64_ivt+0x0c00
+////////////////////////////////////////////////////////////////////
+// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
+ENTRY(kvm_alt_itlb_miss)
+ mov r16=cr.ifa // get address that caused the TLB miss
+ ;;
+ movl r17=PAGE_KERNEL
+ mov r24=cr.ipsr
+ movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
+ ;;
+ and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
+ ;;
+ or r19=r17,r19 // insert PTE control bits into r19
+ ;;
+ movl r20=IA64_GRANULE_SHIFT<<2
+ ;;
+ mov cr.itir=r20
+ ;;
+ itc.i r19 // insert the TLB entry
+ mov pr=r31,-1
+ rfi
+END(kvm_alt_itlb_miss)
+
+ .org kvm_ia64_ivt+0x1000
+/////////////////////////////////////////////////////////////////////
+// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
+ENTRY(kvm_alt_dtlb_miss)
+ mov r16=cr.ifa // get address that caused the TLB miss
+ ;;
+ movl r17=PAGE_KERNEL
+ movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
+ mov r24=cr.ipsr
+ ;;
+ and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
+ ;;
+ or r19=r19,r17 // insert PTE control bits into r19
+ ;;
+ movl r20=IA64_GRANULE_SHIFT<<2
+ ;;
+ mov cr.itir=r20
+ ;;
+ itc.d r19 // insert the TLB entry
+ mov pr=r31,-1
+ rfi
+END(kvm_alt_dtlb_miss)
+
+ .org kvm_ia64_ivt+0x1400
+//////////////////////////////////////////////////////////////////////
+// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
+ENTRY(kvm_nested_dtlb_miss)
+ KVM_FAULT(5)
+END(kvm_nested_dtlb_miss)
+
+ .org kvm_ia64_ivt+0x1800
+/////////////////////////////////////////////////////////////////////
+// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
+ENTRY(kvm_ikey_miss)
+ KVM_REFLECT(6)
+END(kvm_ikey_miss)
+
+ .org kvm_ia64_ivt+0x1c00
+/////////////////////////////////////////////////////////////////////
+// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
+ENTRY(kvm_dkey_miss)
+ KVM_REFLECT(7)
+END(kvm_dkey_miss)
+
+ .org kvm_ia64_ivt+0x2000
+////////////////////////////////////////////////////////////////////
+// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
+ENTRY(kvm_dirty_bit)
+ KVM_REFLECT(8)
+END(kvm_dirty_bit)
+
+ .org kvm_ia64_ivt+0x2400
+////////////////////////////////////////////////////////////////////
+// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
+ENTRY(kvm_iaccess_bit)
+ KVM_REFLECT(9)
+END(kvm_iaccess_bit)
+
+ .org kvm_ia64_ivt+0x2800
+///////////////////////////////////////////////////////////////////
+// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
+ENTRY(kvm_daccess_bit)
+ KVM_REFLECT(10)
+END(kvm_daccess_bit)
+
+ .org kvm_ia64_ivt+0x2c00
+/////////////////////////////////////////////////////////////////
+// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
+ENTRY(kvm_break_fault)
+ mov r31=pr
+ mov r19=11
+ mov r29=cr.ipsr
+ ;;
+ KVM_SAVE_MIN_WITH_COVER_R19
+ ;;
+ alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
+ mov out0=cr.ifa
+ mov out2=cr.isr // FIXME: pity to make this slow access twice
+ mov out3=cr.iim // FIXME: pity to make this slow access twice
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ ssm psr.ic
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+ //(p15)ssm psr.i // restore psr.i
+ addl r14=@gprel(ia64_leave_hypervisor),gp
+ ;;
+ KVM_SAVE_REST
+ mov rp=r14
+ ;;
+ adds out1=16,sp
+ br.call.sptk.many b6=kvm_ia64_handle_break
+ ;;
+END(kvm_break_fault)
+
+ .org kvm_ia64_ivt+0x3000
+/////////////////////////////////////////////////////////////////
+// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
+ENTRY(kvm_interrupt)
+ mov r31=pr // prepare to save predicates
+ mov r19=12
+ mov r29=cr.ipsr
+ ;;
+ tbit.z p6,p7=r29,IA64_PSR_VM_BIT
+ tbit.z p0,p15=r29,IA64_PSR_I_BIT
+ ;;
+(p7) br.sptk kvm_dispatch_interrupt
+ ;;
+ mov r27=ar.rsc /* M */
+ mov r20=r1 /* A */
+ mov r25=ar.unat /* M */
+ mov r26=ar.pfs /* I */
+ mov r28=cr.iip /* M */
+ cover /* B (or nothing) */
+ ;;
+ mov r1=sp
+ ;;
+ invala /* M */
+ mov r30=cr.ifs
+ ;;
+ addl r1=-VMM_PT_REGS_SIZE,r1
+ ;;
+ adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */
+ adds r16=PT(CR_IPSR),r1
+ ;;
+ lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
+ st8 [r16]=r29 /* save cr.ipsr */
+ ;;
+ lfetch.fault.excl.nt1 [r17]
+ mov r29=b0
+ ;;
+ adds r16=PT(R8),r1 /* initialize first base pointer */
+ adds r17=PT(R9),r1 /* initialize second base pointer */
+ mov r18=r0 /* make sure r18 isn't NaT */
+ ;;
+.mem.offset 0,0; st8.spill [r16]=r8,16
+.mem.offset 8,0; st8.spill [r17]=r9,16
+ ;;
+.mem.offset 0,0; st8.spill [r16]=r10,24
+.mem.offset 8,0; st8.spill [r17]=r11,24
+ ;;
+ st8 [r16]=r28,16 /* save cr.iip */
+ st8 [r17]=r30,16 /* save cr.ifs */
+ mov r8=ar.fpsr /* M */
+ mov r9=ar.csd
+ mov r10=ar.ssd
+ movl r11=FPSR_DEFAULT /* L-unit */
+ ;;
+ st8 [r16]=r25,16 /* save ar.unat */
+ st8 [r17]=r26,16 /* save ar.pfs */
+ shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
+ ;;
+ st8 [r16]=r27,16 /* save ar.rsc */
+ adds r17=16,r17 /* skip over ar_rnat field */
+ ;;
+ st8 [r17]=r31,16 /* save predicates */
+ adds r16=16,r16 /* skip over ar_bspstore field */
+ ;;
+ st8 [r16]=r29,16 /* save b0 */
+ st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */
+ ;;
+.mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */
+.mem.offset 8,0; st8.spill [r17]=r12,16
+ adds r12=-16,r1
+ /* switch to kernel memory stack (with 16 bytes of scratch) */
+ ;;
+.mem.offset 0,0; st8.spill [r16]=r13,16
+.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
+ ;;
+.mem.offset 0,0; st8.spill [r16]=r15,16
+.mem.offset 8,0; st8.spill [r17]=r14,16
+ dep r14=-1,r0,60,4
+ ;;
+.mem.offset 0,0; st8.spill [r16]=r2,16
+.mem.offset 8,0; st8.spill [r17]=r3,16
+ adds r2=VMM_PT_REGS_R16_OFFSET,r1
+ adds r14 = VMM_VCPU_GP_OFFSET,r13
+ ;;
+ mov r8=ar.ccv
+ ld8 r14 = [r14]
+ ;;
+ mov r1=r14 /* establish kernel global pointer */
+ ;; \
+ bsw.1
+ ;;
+ alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
+ mov out0=r13
+ ;;
+ ssm psr.ic
+ ;;
+ srlz.i
+ ;;
+ //(p15) ssm psr.i
+ adds r3=8,r2 // set up second base pointer for SAVE_REST
+ srlz.i // ensure everybody knows psr.ic is back on
+ ;;
+.mem.offset 0,0; st8.spill [r2]=r16,16
+.mem.offset 8,0; st8.spill [r3]=r17,16
+ ;;
+.mem.offset 0,0; st8.spill [r2]=r18,16
+.mem.offset 8,0; st8.spill [r3]=r19,16
+ ;;
+.mem.offset 0,0; st8.spill [r2]=r20,16
+.mem.offset 8,0; st8.spill [r3]=r21,16
+ mov r18=b6
+ ;;
+.mem.offset 0,0; st8.spill [r2]=r22,16
+.mem.offset 8,0; st8.spill [r3]=r23,16
+ mov r19=b7
+ ;;
+.mem.offset 0,0; st8.spill [r2]=r24,16
+.mem.offset 8,0; st8.spill [r3]=r25,16
+ ;;
+.mem.offset 0,0; st8.spill [r2]=r26,16
+.mem.offset 8,0; st8.spill [r3]=r27,16
+ ;;
+.mem.offset 0,0; st8.spill [r2]=r28,16
+.mem.offset 8,0; st8.spill [r3]=r29,16
+ ;;
+.mem.offset 0,0; st8.spill [r2]=r30,16
+.mem.offset 8,0; st8.spill [r3]=r31,32
+ ;;
+ mov ar.fpsr=r11 /* M-unit */
+ st8 [r2]=r8,8 /* ar.ccv */
+ adds r24=PT(B6)-PT(F7),r3
+ ;;
+ stf.spill [r2]=f6,32
+ stf.spill [r3]=f7,32
+ ;;
+ stf.spill [r2]=f8,32
+ stf.spill [r3]=f9,32
+ ;;
+ stf.spill [r2]=f10
+ stf.spill [r3]=f11
+ adds r25=PT(B7)-PT(F11),r3
+ ;;
+ st8 [r24]=r18,16 /* b6 */
+ st8 [r25]=r19,16 /* b7 */
+ ;;
+ st8 [r24]=r9 /* ar.csd */
+ st8 [r25]=r10 /* ar.ssd */
+ ;;
+ srlz.d // make sure we see the effect of cr.ivr
+ addl r14=@gprel(ia64_leave_nested),gp
+ ;;
+ mov rp=r14
+ br.call.sptk.many b6=kvm_ia64_handle_irq
+ ;;
+END(kvm_interrupt)
+
+ .global kvm_dispatch_vexirq
+ .org kvm_ia64_ivt+0x3400
+//////////////////////////////////////////////////////////////////////
+// 0x3400 Entry 13 (size 64 bundles) Reserved
+ENTRY(kvm_virtual_exirq)
+ mov r31=pr
+ mov r19=13
+ mov r30 =r0
+ ;;
+kvm_dispatch_vexirq:
+ cmp.eq p6,p0 = 1,r30
+ ;;
+(p6)add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21
+ ;;
+(p6)ld8 r1 = [r29]
+ ;;
+ KVM_SAVE_MIN_WITH_COVER_R19
+ alloc r14=ar.pfs,0,0,1,0
+ mov out0=r13
+
+ ssm psr.ic
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+ //(p15) ssm psr.i // restore psr.i
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ KVM_SAVE_REST
+ addl r14=@gprel(ia64_leave_hypervisor),gp
+ ;;
+ mov rp=r14
+ br.call.sptk.many b6=kvm_vexirq
+END(kvm_virtual_exirq)
+
+ .org kvm_ia64_ivt+0x3800
+/////////////////////////////////////////////////////////////////////
+// 0x3800 Entry 14 (size 64 bundles) Reserved
+ KVM_FAULT(14)
+ // this code segment is from 2.6.16.13
+
+
+ .org kvm_ia64_ivt+0x3c00
+///////////////////////////////////////////////////////////////////////
+// 0x3c00 Entry 15 (size 64 bundles) Reserved
+ KVM_FAULT(15)
+
+
+ .org kvm_ia64_ivt+0x4000
+///////////////////////////////////////////////////////////////////////
+// 0x4000 Entry 16 (size 64 bundles) Reserved
+ KVM_FAULT(16)
+
+ .org kvm_ia64_ivt+0x4400
+//////////////////////////////////////////////////////////////////////
+// 0x4400 Entry 17 (size 64 bundles) Reserved
+ KVM_FAULT(17)
+
+ .org kvm_ia64_ivt+0x4800
+//////////////////////////////////////////////////////////////////////
+// 0x4800 Entry 18 (size 64 bundles) Reserved
+ KVM_FAULT(18)
+
+ .org kvm_ia64_ivt+0x4c00
+//////////////////////////////////////////////////////////////////////
+// 0x4c00 Entry 19 (size 64 bundles) Reserved
+ KVM_FAULT(19)
+
+ .org kvm_ia64_ivt+0x5000
+//////////////////////////////////////////////////////////////////////
+// 0x5000 Entry 20 (size 16 bundles) Page Not Present
+ENTRY(kvm_page_not_present)
+ KVM_REFLECT(20)
+END(kvm_page_not_present)
+
+ .org kvm_ia64_ivt+0x5100
+///////////////////////////////////////////////////////////////////////
+// 0x5100 Entry 21 (size 16 bundles) Key Permission vector
+ENTRY(kvm_key_permission)
+ KVM_REFLECT(21)
+END(kvm_key_permission)
+
+ .org kvm_ia64_ivt+0x5200
+//////////////////////////////////////////////////////////////////////
+// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
+ENTRY(kvm_iaccess_rights)
+ KVM_REFLECT(22)
+END(kvm_iaccess_rights)
+
+ .org kvm_ia64_ivt+0x5300
+//////////////////////////////////////////////////////////////////////
+// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
+ENTRY(kvm_daccess_rights)
+ KVM_REFLECT(23)
+END(kvm_daccess_rights)
+
+ .org kvm_ia64_ivt+0x5400
+/////////////////////////////////////////////////////////////////////
+// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
+ENTRY(kvm_general_exception)
+ KVM_REFLECT(24)
+ KVM_FAULT(24)
+END(kvm_general_exception)
+
+ .org kvm_ia64_ivt+0x5500
+//////////////////////////////////////////////////////////////////////
+// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
+ENTRY(kvm_disabled_fp_reg)
+ KVM_REFLECT(25)
+END(kvm_disabled_fp_reg)
+
+ .org kvm_ia64_ivt+0x5600
+////////////////////////////////////////////////////////////////////
+// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
+ENTRY(kvm_nat_consumption)
+ KVM_REFLECT(26)
+END(kvm_nat_consumption)
+
+ .org kvm_ia64_ivt+0x5700
+/////////////////////////////////////////////////////////////////////
+// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
+ENTRY(kvm_speculation_vector)
+ KVM_REFLECT(27)
+END(kvm_speculation_vector)
+
+ .org kvm_ia64_ivt+0x5800
+/////////////////////////////////////////////////////////////////////
+// 0x5800 Entry 28 (size 16 bundles) Reserved
+ KVM_FAULT(28)
+
+ .org kvm_ia64_ivt+0x5900
+///////////////////////////////////////////////////////////////////
+// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
+ENTRY(kvm_debug_vector)
+ KVM_FAULT(29)
+END(kvm_debug_vector)
+
+ .org kvm_ia64_ivt+0x5a00
+///////////////////////////////////////////////////////////////
+// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
+ENTRY(kvm_unaligned_access)
+ KVM_REFLECT(30)
+END(kvm_unaligned_access)
+
+ .org kvm_ia64_ivt+0x5b00
+//////////////////////////////////////////////////////////////////////
+// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
+ENTRY(kvm_unsupported_data_reference)
+ KVM_REFLECT(31)
+END(kvm_unsupported_data_reference)
+
+ .org kvm_ia64_ivt+0x5c00
+////////////////////////////////////////////////////////////////////
+// 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65)
+ENTRY(kvm_floating_point_fault)
+ KVM_REFLECT(32)
+END(kvm_floating_point_fault)
+
+ .org kvm_ia64_ivt+0x5d00
+/////////////////////////////////////////////////////////////////////
+// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
+ENTRY(kvm_floating_point_trap)
+ KVM_REFLECT(33)
+END(kvm_floating_point_trap)
+
+ .org kvm_ia64_ivt+0x5e00
+//////////////////////////////////////////////////////////////////////
+// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
+ENTRY(kvm_lower_privilege_trap)
+ KVM_REFLECT(34)
+END(kvm_lower_privilege_trap)
+
+ .org kvm_ia64_ivt+0x5f00
+//////////////////////////////////////////////////////////////////////
+// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
+ENTRY(kvm_taken_branch_trap)
+ KVM_REFLECT(35)
+END(kvm_taken_branch_trap)
+
+ .org kvm_ia64_ivt+0x6000
+////////////////////////////////////////////////////////////////////
+// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
+ENTRY(kvm_single_step_trap)
+ KVM_REFLECT(36)
+END(kvm_single_step_trap)
+ .global kvm_virtualization_fault_back
+ .org kvm_ia64_ivt+0x6100
+/////////////////////////////////////////////////////////////////////
+// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
+ENTRY(kvm_virtualization_fault)
+ mov r31=pr
+ adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
+ ;;
+ st8 [r16] = r1
+ adds r17 = VMM_VCPU_GP_OFFSET, r21
+ ;;
+ ld8 r1 = [r17]
+ cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24
+ cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24
+ cmp.eq p8,p0=EVENT_MOV_TO_RR,r24
+ cmp.eq p9,p0=EVENT_RSM,r24
+ cmp.eq p10,p0=EVENT_SSM,r24
+ cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
+ cmp.eq p12,p0=EVENT_THASH,r24
+ (p6) br.dptk.many kvm_asm_mov_from_ar
+ (p7) br.dptk.many kvm_asm_mov_from_rr
+ (p8) br.dptk.many kvm_asm_mov_to_rr
+ (p9) br.dptk.many kvm_asm_rsm
+ (p10) br.dptk.many kvm_asm_ssm
+ (p11) br.dptk.many kvm_asm_mov_to_psr
+ (p12) br.dptk.many kvm_asm_thash
+ ;;
+kvm_virtualization_fault_back:
+ adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
+ ;;
+ ld8 r1 = [r16]
+ ;;
+ mov r19=37
+ adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
+ adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
+ ;;
+ st8 [r16] = r24
+ st8 [r17] = r25
+ ;;
+ cmp.ne p6,p0=EVENT_RFI, r24
+ (p6) br.sptk kvm_dispatch_virtualization_fault
+ ;;
+ adds r18=VMM_VPD_BASE_OFFSET,r21
+ ;;
+ ld8 r18=[r18]
+ ;;
+ adds r18=VMM_VPD_VIFS_OFFSET,r18
+ ;;
+ ld8 r18=[r18]
+ ;;
+ tbit.z p6,p0=r18,63
+ (p6) br.sptk kvm_dispatch_virtualization_fault
+ ;;
+ //if vifs.v=1 desert current register frame
+ alloc r18=ar.pfs,0,0,0,0
+ br.sptk kvm_dispatch_virtualization_fault
+END(kvm_virtualization_fault)
+
+ .org kvm_ia64_ivt+0x6200
+//////////////////////////////////////////////////////////////
+// 0x6200 Entry 38 (size 16 bundles) Reserved
+ KVM_FAULT(38)
+
+ .org kvm_ia64_ivt+0x6300
+/////////////////////////////////////////////////////////////////
+// 0x6300 Entry 39 (size 16 bundles) Reserved
+ KVM_FAULT(39)
+
+ .org kvm_ia64_ivt+0x6400
+/////////////////////////////////////////////////////////////////
+// 0x6400 Entry 40 (size 16 bundles) Reserved
+ KVM_FAULT(40)
+
+ .org kvm_ia64_ivt+0x6500
+//////////////////////////////////////////////////////////////////
+// 0x6500 Entry 41 (size 16 bundles) Reserved
+ KVM_FAULT(41)
+
+ .org kvm_ia64_ivt+0x6600
+//////////////////////////////////////////////////////////////////
+// 0x6600 Entry 42 (size 16 bundles) Reserved
+ KVM_FAULT(42)
+
+ .org kvm_ia64_ivt+0x6700
+//////////////////////////////////////////////////////////////////
+// 0x6700 Entry 43 (size 16 bundles) Reserved
+ KVM_FAULT(43)
+
+ .org kvm_ia64_ivt+0x6800
+//////////////////////////////////////////////////////////////////
+// 0x6800 Entry 44 (size 16 bundles) Reserved
+ KVM_FAULT(44)
+
+ .org kvm_ia64_ivt+0x6900
+///////////////////////////////////////////////////////////////////
+// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception
+//(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
+ENTRY(kvm_ia32_exception)
+ KVM_FAULT(45)
+END(kvm_ia32_exception)
+
+ .org kvm_ia64_ivt+0x6a00
+////////////////////////////////////////////////////////////////////
+// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
+ENTRY(kvm_ia32_intercept)
+ KVM_FAULT(47)
+END(kvm_ia32_intercept)
+
+ .org kvm_ia64_ivt+0x6c00
+/////////////////////////////////////////////////////////////////////
+// 0x6c00 Entry 48 (size 16 bundles) Reserved
+ KVM_FAULT(48)
+
+ .org kvm_ia64_ivt+0x6d00
+//////////////////////////////////////////////////////////////////////
+// 0x6d00 Entry 49 (size 16 bundles) Reserved
+ KVM_FAULT(49)
+
+ .org kvm_ia64_ivt+0x6e00
+//////////////////////////////////////////////////////////////////////
+// 0x6e00 Entry 50 (size 16 bundles) Reserved
+ KVM_FAULT(50)
+
+ .org kvm_ia64_ivt+0x6f00
+/////////////////////////////////////////////////////////////////////
+// 0x6f00 Entry 51 (size 16 bundles) Reserved
+ KVM_FAULT(52)
+
+ .org kvm_ia64_ivt+0x7100
+////////////////////////////////////////////////////////////////////
+// 0x7100 Entry 53 (size 16 bundles) Reserved
+ KVM_FAULT(53)
+
+ .org kvm_ia64_ivt+0x7200
+/////////////////////////////////////////////////////////////////////
+// 0x7200 Entry 54 (size 16 bundles) Reserved
+ KVM_FAULT(54)
+
+ .org kvm_ia64_ivt+0x7300
+////////////////////////////////////////////////////////////////////
+// 0x7300 Entry 55 (size 16 bundles) Reserved
+ KVM_FAULT(55)
+
+ .org kvm_ia64_ivt+0x7400
+////////////////////////////////////////////////////////////////////
+// 0x7400 Entry 56 (size 16 bundles) Reserved
+ KVM_FAULT(56)
+
+ .org kvm_ia64_ivt+0x7500
+/////////////////////////////////////////////////////////////////////
+// 0x7500 Entry 57 (size 16 bundles) Reserved
+ KVM_FAULT(57)
+
+ .org kvm_ia64_ivt+0x7600
+/////////////////////////////////////////////////////////////////////
+// 0x7600 Entry 58 (size 16 bundles) Reserved
+ KVM_FAULT(58)
+
+ .org kvm_ia64_ivt+0x7700
+////////////////////////////////////////////////////////////////////
+// 0x7700 Entry 59 (size 16 bundles) Reserved
+ KVM_FAULT(59)
+
+ .org kvm_ia64_ivt+0x7800
+////////////////////////////////////////////////////////////////////
+// 0x7800 Entry 60 (size 16 bundles) Reserved
+ KVM_FAULT(60)
+
+ .org kvm_ia64_ivt+0x7900
+/////////////////////////////////////////////////////////////////////
+// 0x7900 Entry 61 (size 16 bundles) Reserved
+ KVM_FAULT(61)
+
+ .org kvm_ia64_ivt+0x7a00
+/////////////////////////////////////////////////////////////////////
+// 0x7a00 Entry 62 (size 16 bundles) Reserved
+ KVM_FAULT(62)
+
+ .org kvm_ia64_ivt+0x7b00
+/////////////////////////////////////////////////////////////////////
+// 0x7b00 Entry 63 (size 16 bundles) Reserved
+ KVM_FAULT(63)
+
+ .org kvm_ia64_ivt+0x7c00
+////////////////////////////////////////////////////////////////////
+// 0x7c00 Entry 64 (size 16 bundles) Reserved
+ KVM_FAULT(64)
+
+ .org kvm_ia64_ivt+0x7d00
+/////////////////////////////////////////////////////////////////////
+// 0x7d00 Entry 65 (size 16 bundles) Reserved
+ KVM_FAULT(65)
+
+ .org kvm_ia64_ivt+0x7e00
+/////////////////////////////////////////////////////////////////////
+// 0x7e00 Entry 66 (size 16 bundles) Reserved
+ KVM_FAULT(66)
+
+ .org kvm_ia64_ivt+0x7f00
+////////////////////////////////////////////////////////////////////
+// 0x7f00 Entry 67 (size 16 bundles) Reserved
+ KVM_FAULT(67)
+
+ .org kvm_ia64_ivt+0x8000
+// There is no particular reason for this code to be here, other than that
+// there happens to be space here that would go unused otherwise. If this
+// fault ever gets "unreserved", simply moved the following code to a more
+// suitable spot...
+
+
+ENTRY(kvm_dtlb_miss_dispatch)
+ mov r19 = 2
+ KVM_SAVE_MIN_WITH_COVER_R19
+ alloc r14=ar.pfs,0,0,3,0
+ mov out0=cr.ifa
+ mov out1=r15
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ ssm psr.ic
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+ //(p15) ssm psr.i // restore psr.i
+ addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
+ ;;
+ KVM_SAVE_REST
+ KVM_SAVE_EXTRA
+ mov rp=r14
+ ;;
+ adds out2=16,r12
+ br.call.sptk.many b6=kvm_page_fault
+END(kvm_dtlb_miss_dispatch)
+
+ENTRY(kvm_itlb_miss_dispatch)
+
+ KVM_SAVE_MIN_WITH_COVER_R19
+ alloc r14=ar.pfs,0,0,3,0
+ mov out0=cr.ifa
+ mov out1=r15
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ ssm psr.ic
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+ //(p15) ssm psr.i // restore psr.i
+ addl r14=@gprel(ia64_leave_hypervisor),gp
+ ;;
+ KVM_SAVE_REST
+ mov rp=r14
+ ;;
+ adds out2=16,r12
+ br.call.sptk.many b6=kvm_page_fault
+END(kvm_itlb_miss_dispatch)
+
+ENTRY(kvm_dispatch_reflection)
+ /*
+ * Input:
+ * psr.ic: off
+ * r19: intr type (offset into ivt, see ia64_int.h)
+ * r31: contains saved predicates (pr)
+ */
+ KVM_SAVE_MIN_WITH_COVER_R19
+ alloc r14=ar.pfs,0,0,5,0
+ mov out0=cr.ifa
+ mov out1=cr.isr
+ mov out2=cr.iim
+ mov out3=r15
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ ssm psr.ic
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+ //(p15) ssm psr.i // restore psr.i
+ addl r14=@gprel(ia64_leave_hypervisor),gp
+ ;;
+ KVM_SAVE_REST
+ mov rp=r14
+ ;;
+ adds out4=16,r12
+ br.call.sptk.many b6=reflect_interruption
+END(kvm_dispatch_reflection)
+
+ENTRY(kvm_dispatch_virtualization_fault)
+ adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
+ adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
+ ;;
+ st8 [r16] = r24
+ st8 [r17] = r25
+ ;;
+ KVM_SAVE_MIN_WITH_COVER_R19
+ ;;
+ alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
+ mov out0=r13 //vcpu
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ ssm psr.ic
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+ //(p15) ssm psr.i // restore psr.i
+ addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
+ ;;
+ KVM_SAVE_REST
+ KVM_SAVE_EXTRA
+ mov rp=r14
+ ;;
+ adds out1=16,sp //regs
+ br.call.sptk.many b6=kvm_emulate
+END(kvm_dispatch_virtualization_fault)
+
+
+ENTRY(kvm_dispatch_interrupt)
+ KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
+ ;;
+ alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
+ //mov out0=cr.ivr // pass cr.ivr as first arg
+ adds r3=8,r2 // set up second base pointer for SAVE_REST
+ ;;
+ ssm psr.ic
+ ;;
+ srlz.i
+ ;;
+ //(p15) ssm psr.i
+ addl r14=@gprel(ia64_leave_hypervisor),gp
+ ;;
+ KVM_SAVE_REST
+ mov rp=r14
+ ;;
+ mov out0=r13 // pass pointer to pt_regs as second arg
+ br.call.sptk.many b6=kvm_ia64_handle_irq
+END(kvm_dispatch_interrupt)
+
+
+
+
+GLOBAL_ENTRY(ia64_leave_nested)
+ rsm psr.i
+ ;;
+ adds r21=PT(PR)+16,r12
+ ;;
+ lfetch [r21],PT(CR_IPSR)-PT(PR)
+ adds r2=PT(B6)+16,r12
+ adds r3=PT(R16)+16,r12
+ ;;
+ lfetch [r21]
+ ld8 r28=[r2],8 // load b6
+ adds r29=PT(R24)+16,r12
+
+ ld8.fill r16=[r3]
+ adds r3=PT(AR_CSD)-PT(R16),r3
+ adds r30=PT(AR_CCV)+16,r12
+ ;;
+ ld8.fill r24=[r29]
+ ld8 r15=[r30] // load ar.ccv
+ ;;
+ ld8 r29=[r2],16 // load b7
+ ld8 r30=[r3],16 // load ar.csd
+ ;;
+ ld8 r31=[r2],16 // load ar.ssd
+ ld8.fill r8=[r3],16
+ ;;
+ ld8.fill r9=[r2],16
+ ld8.fill r10=[r3],PT(R17)-PT(R10)
+ ;;
+ ld8.fill r11=[r2],PT(R18)-PT(R11)
+ ld8.fill r17=[r3],16
+ ;;
+ ld8.fill r18=[r2],16
+ ld8.fill r19=[r3],16
+ ;;
+ ld8.fill r20=[r2],16
+ ld8.fill r21=[r3],16
+ mov ar.csd=r30
+ mov ar.ssd=r31
+ ;;
+ rsm psr.i | psr.ic
+ // initiate turning off of interrupt and interruption collection
+ invala // invalidate ALAT
+ ;;
+ srlz.i
+ ;;
+ ld8.fill r22=[r2],24
+ ld8.fill r23=[r3],24
+ mov b6=r28
+ ;;
+ ld8.fill r25=[r2],16
+ ld8.fill r26=[r3],16
+ mov b7=r29
+ ;;
+ ld8.fill r27=[r2],16
+ ld8.fill r28=[r3],16
+ ;;
+ ld8.fill r29=[r2],16
+ ld8.fill r30=[r3],24
+ ;;
+ ld8.fill r31=[r2],PT(F9)-PT(R31)
+ adds r3=PT(F10)-PT(F6),r3
+ ;;
+ ldf.fill f9=[r2],PT(F6)-PT(F9)
+ ldf.fill f10=[r3],PT(F8)-PT(F10)
+ ;;
+ ldf.fill f6=[r2],PT(F7)-PT(F6)
+ ;;
+ ldf.fill f7=[r2],PT(F11)-PT(F7)
+ ldf.fill f8=[r3],32
+ ;;
+ srlz.i // ensure interruption collection is off
+ mov ar.ccv=r15
+ ;;
+ bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
+ ;;
+ ldf.fill f11=[r2]
+// mov r18=r13
+// mov r21=r13
+ adds r16=PT(CR_IPSR)+16,r12
+ adds r17=PT(CR_IIP)+16,r12
+ ;;
+ ld8 r29=[r16],16 // load cr.ipsr
+ ld8 r28=[r17],16 // load cr.iip
+ ;;
+ ld8 r30=[r16],16 // load cr.ifs
+ ld8 r25=[r17],16 // load ar.unat
+ ;;
+ ld8 r26=[r16],16 // load ar.pfs
+ ld8 r27=[r17],16 // load ar.rsc
+ cmp.eq p9,p0=r0,r0
+ // set p9 to indicate that we should restore cr.ifs
+ ;;
+ ld8 r24=[r16],16 // load ar.rnat (may be garbage)
+ ld8 r23=[r17],16// load ar.bspstore (may be garbage)
+ ;;
+ ld8 r31=[r16],16 // load predicates
+ ld8 r22=[r17],16 // load b0
+ ;;
+ ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
+ ld8.fill r1=[r17],16 // load r1
+ ;;
+ ld8.fill r12=[r16],16
+ ld8.fill r13=[r17],16
+ ;;
+ ld8 r20=[r16],16 // ar.fpsr
+ ld8.fill r15=[r17],16
+ ;;
+ ld8.fill r14=[r16],16
+ ld8.fill r2=[r17]
+ ;;
+ ld8.fill r3=[r16]
+ ;;
+ mov r16=ar.bsp // get existing backing store pointer
+ ;;
+ mov b0=r22
+ mov ar.pfs=r26
+ mov cr.ifs=r30
+ mov cr.ipsr=r29
+ mov ar.fpsr=r20
+ mov cr.iip=r28
+ ;;
+ mov ar.rsc=r27
+ mov ar.unat=r25
+ mov pr=r31,-1
+ rfi
+END(ia64_leave_nested)
+
+
+
+GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
+ /*
+ * work.need_resched etc. mustn't get changed
+ *by this CPU before it returns to
+ ;;
+ * user- or fsys-mode, hence we disable interrupts early on:
+ */
+ adds r2 = PT(R4)+16,r12
+ adds r3 = PT(R5)+16,r12
+ adds r8 = PT(EML_UNAT)+16,r12
+ ;;
+ ld8 r8 = [r8]
+ ;;
+ mov ar.unat=r8
+ ;;
+ ld8.fill r4=[r2],16 //load r4
+ ld8.fill r5=[r3],16 //load r5
+ ;;
+ ld8.fill r6=[r2] //load r6
+ ld8.fill r7=[r3] //load r7
+ ;;
+END(ia64_leave_hypervisor_prepare)
+//fall through
+GLOBAL_ENTRY(ia64_leave_hypervisor)
+ rsm psr.i
+ ;;
+ br.call.sptk.many b0=leave_hypervisor_tail
+ ;;
+ adds r20=PT(PR)+16,r12
+ adds r8=PT(EML_UNAT)+16,r12
+ ;;
+ ld8 r8=[r8]
+ ;;
+ mov ar.unat=r8
+ ;;
+ lfetch [r20],PT(CR_IPSR)-PT(PR)
+ adds r2 = PT(B6)+16,r12
+ adds r3 = PT(B7)+16,r12
+ ;;
+ lfetch [r20]
+ ;;
+ ld8 r24=[r2],16 /* B6 */
+ ld8 r25=[r3],16 /* B7 */
+ ;;
+ ld8 r26=[r2],16 /* ar_csd */
+ ld8 r27=[r3],16 /* ar_ssd */
+ mov b6 = r24
+ ;;
+ ld8.fill r8=[r2],16
+ ld8.fill r9=[r3],16
+ mov b7 = r25
+ ;;
+ mov ar.csd = r26
+ mov ar.ssd = r27
+ ;;
+ ld8.fill r10=[r2],PT(R15)-PT(R10)
+ ld8.fill r11=[r3],PT(R14)-PT(R11)
+ ;;
+ ld8.fill r15=[r2],PT(R16)-PT(R15)
+ ld8.fill r14=[r3],PT(R17)-PT(R14)
+ ;;
+ ld8.fill r16=[r2],16
+ ld8.fill r17=[r3],16
+ ;;
+ ld8.fill r18=[r2],16
+ ld8.fill r19=[r3],16
+ ;;
+ ld8.fill r20=[r2],16
+ ld8.fill r21=[r3],16
+ ;;
+ ld8.fill r22=[r2],16
+ ld8.fill r23=[r3],16
+ ;;
+ ld8.fill r24=[r2],16
+ ld8.fill r25=[r3],16
+ ;;
+ ld8.fill r26=[r2],16
+ ld8.fill r27=[r3],16
+ ;;
+ ld8.fill r28=[r2],16
+ ld8.fill r29=[r3],16
+ ;;
+ ld8.fill r30=[r2],PT(F6)-PT(R30)
+ ld8.fill r31=[r3],PT(F7)-PT(R31)
+ ;;
+ rsm psr.i | psr.ic
+ // initiate turning off of interrupt and interruption collection
+ invala // invalidate ALAT
+ ;;
+ srlz.i // ensure interruption collection is off
+ ;;
+ bsw.0
+ ;;
+ adds r16 = PT(CR_IPSR)+16,r12
+ adds r17 = PT(CR_IIP)+16,r12
+ mov r21=r13 // get current
+ ;;
+ ld8 r31=[r16],16 // load cr.ipsr
+ ld8 r30=[r17],16 // load cr.iip
+ ;;
+ ld8 r29=[r16],16 // load cr.ifs
+ ld8 r28=[r17],16 // load ar.unat
+ ;;
+ ld8 r27=[r16],16 // load ar.pfs
+ ld8 r26=[r17],16 // load ar.rsc
+ ;;
+ ld8 r25=[r16],16 // load ar.rnat
+ ld8 r24=[r17],16 // load ar.bspstore
+ ;;
+ ld8 r23=[r16],16 // load predicates
+ ld8 r22=[r17],16 // load b0
+ ;;
+ ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
+ ld8.fill r1=[r17],16 //load r1
+ ;;
+ ld8.fill r12=[r16],16 //load r12
+ ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
+ ;;
+ ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
+ ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
+ ;;
+ ld8.fill r3=[r16] //load r3
+ ld8 r18=[r17] //load ar_ccv
+ ;;
+ mov ar.fpsr=r19
+ mov ar.ccv=r18
+ shr.u r18=r20,16
+ ;;
+kvm_rbs_switch:
+ mov r19=96
+
+kvm_dont_preserve_current_frame:
+/*
+ * To prevent leaking bits between the hypervisor and guest domain,
+ * we must clear the stacked registers in the "invalid" partition here.
+ * 5 registers/cycle on McKinley).
+ */
+# define pRecurse p6
+# define pReturn p7
+# define Nregs 14
+
+ alloc loc0=ar.pfs,2,Nregs-2,2,0
+ shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
+ sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize
+ ;;
+ mov ar.rsc=r20 // load ar.rsc to be used for "loadrs"
+ shladd in0=loc1,3,r19
+ mov in1=0
+ ;;
+ TEXT_ALIGN(32)
+kvm_rse_clear_invalid:
+ alloc loc0=ar.pfs,2,Nregs-2,2,0
+ cmp.lt pRecurse,p0=Nregs*8,in0
+ // if more than Nregs regs left to clear, (re)curse
+ add out0=-Nregs*8,in0
+ add out1=1,in1 // increment recursion count
+ mov loc1=0
+ mov loc2=0
+ ;;
+ mov loc3=0
+ mov loc4=0
+ mov loc5=0
+ mov loc6=0
+ mov loc7=0
+(pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid
+ ;;
+ mov loc8=0
+ mov loc9=0
+ cmp.ne pReturn,p0=r0,in1
+ // if recursion count != 0, we need to do a br.ret
+ mov loc10=0
+ mov loc11=0
+(pReturn) br.ret.dptk.many b0
+
+# undef pRecurse
+# undef pReturn
+
+// loadrs has already been shifted
+ alloc r16=ar.pfs,0,0,0,0 // drop current register frame
+ ;;
+ loadrs
+ ;;
+ mov ar.bspstore=r24
+ ;;
+ mov ar.unat=r28
+ mov ar.rnat=r25
+ mov ar.rsc=r26
+ ;;
+ mov cr.ipsr=r31
+ mov cr.iip=r30
+ mov cr.ifs=r29
+ mov ar.pfs=r27
+ adds r18=VMM_VPD_BASE_OFFSET,r21
+ ;;
+ ld8 r18=[r18] //vpd
+ adds r17=VMM_VCPU_ISR_OFFSET,r21
+ ;;
+ ld8 r17=[r17]
+ adds r19=VMM_VPD_VPSR_OFFSET,r18
+ ;;
+ ld8 r19=[r19] //vpsr
+ adds r20=VMM_VCPU_VSA_BASE_OFFSET,r21
+ ;;
+ ld8 r20=[r20]
+ ;;
+//vsa_sync_write_start
+ mov r25=r18
+ adds r16= VMM_VCPU_GP_OFFSET,r21
+ ;;
+ ld8 r16= [r16] // Put gp in r24
+ movl r24=@gprel(ia64_vmm_entry) // calculate return address
+ ;;
+ add r24=r24,r16
+ ;;
+ add r16=PAL_VPS_SYNC_WRITE,r20
+ ;;
+ mov b0=r16
+ br.cond.sptk b0 // call the service
+ ;;
+END(ia64_leave_hypervisor)
+// fall through
+GLOBAL_ENTRY(ia64_vmm_entry)
+/*
+ * must be at bank 0
+ * parameter:
+ * r17:cr.isr
+ * r18:vpd
+ * r19:vpsr
+ * r20:__vsa_base
+ * r22:b0
+ * r23:predicate
+ */
+ mov r24=r22
+ mov r25=r18
+ tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
+ ;;
+ (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
+ (p1) br.sptk.many ia64_vmm_entry_out
+ ;;
+ tbit.nz p1,p2 = r17,IA64_ISR_IR_BIT //p1=cr.isr.ir
+ ;;
+ (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
+ (p2) add r29=PAL_VPS_RESUME_HANDLER,r20
+ (p2) ld8 r26=[r25]
+ ;;
+ia64_vmm_entry_out:
+ mov pr=r23,-2
+ mov b0=r29
+ ;;
+ br.cond.sptk b0 // call pal service
+END(ia64_vmm_entry)
+
+
+
+/*
+ * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2,
+ * u64 arg3, u64 arg4, u64 arg5,
+ * u64 arg6, u64 arg7);
+ *
+ * XXX: The currently defined services use only 4 args at the max. The
+ * rest are not consumed.
+ */
+GLOBAL_ENTRY(ia64_call_vsa)
+ .regstk 4,4,0,0
+
+rpsave = loc0
+pfssave = loc1
+psrsave = loc2
+entry = loc3
+hostret = r24
+
+ alloc pfssave=ar.pfs,4,4,0,0
+ mov rpsave=rp
+ adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13
+ ;;
+ ld8 entry=[entry]
+1: mov hostret=ip
+ mov r25=in1 // copy arguments
+ mov r26=in2
+ mov r27=in3
+ mov psrsave=psr
+ ;;
+ tbit.nz p6,p0=psrsave,14 // IA64_PSR_I
+ tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC
+ ;;
+ add hostret=2f-1b,hostret // calculate return address
+ add entry=entry,in0
+ ;;
+ rsm psr.i | psr.ic
+ ;;
+ srlz.i
+ mov b6=entry
+ br.cond.sptk b6 // call the service
+2:
+ // Architectural sequence for enabling interrupts if necessary
+(p7) ssm psr.ic
+ ;;
+(p7) srlz.i
+ ;;
+//(p6) ssm psr.i
+ ;;
+ mov rp=rpsave
+ mov ar.pfs=pfssave
+ mov r8=r31
+ ;;
+ srlz.d
+ br.ret.sptk rp
+
+END(ia64_call_vsa)
+
+#define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100)
+
+GLOBAL_ENTRY(vmm_reset_entry)
+ //set up ipsr, iip, vpd.vpsr, dcr
+ // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1
+ // For DCR: all bits 0
+ adds r14=-VMM_PT_REGS_SIZE, r12
+ ;;
+ movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1
+ movl r10=0x8000000000000000
+ adds r16=PT(CR_IIP), r14
+ adds r20=PT(R1), r14
+ ;;
+ rsm psr.ic | psr.i
+ ;;
+ srlz.i
+ ;;
+ bsw.0
+ ;;
+ mov r21 =r13
+ ;;
+ bsw.1
+ ;;
+ mov ar.rsc = 0
+ ;;
+ flushrs
+ ;;
+ mov ar.bspstore = 0
+ // clear BSPSTORE
+ ;;
+ mov cr.ipsr=r6
+ mov cr.ifs=r10
+ ld8 r4 = [r16] // Set init iip for first run.
+ ld8 r1 = [r20]
+ ;;
+ mov cr.iip=r4
+ ;;
+ adds r16=VMM_VPD_BASE_OFFSET,r13
+ adds r20=VMM_VCPU_VSA_BASE_OFFSET,r13
+ ;;
+ ld8 r18=[r16]
+ ld8 r20=[r20]
+ ;;
+ adds r19=VMM_VPD_VPSR_OFFSET,r18
+ ;;
+ ld8 r19=[r19]
+ mov r17=r0
+ mov r22=r0
+ mov r23=r0
+ br.cond.sptk ia64_vmm_entry
+ br.ret.sptk b0
+END(vmm_reset_entry)
diff --git a/arch/ia64/kvm/vti.h b/arch/ia64/kvm/vti.h
new file mode 100644
index 000000000000..f6c5617e16af
--- /dev/null
+++ b/arch/ia64/kvm/vti.h
@@ -0,0 +1,290 @@
+/*
+ * vti.h: prototype for generial vt related interface
+ * Copyright (c) 2004, Intel Corporation.
+ *
+ * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
+ * Fred Yang (fred.yang@intel.com)
+ * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
+ *
+ * Copyright (c) 2007, Intel Corporation.
+ * Zhang xiantao <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+#ifndef _KVM_VT_I_H
+#define _KVM_VT_I_H
+
+#ifndef __ASSEMBLY__
+#include <asm/page.h>
+
+#include <linux/kvm_host.h>
+
+/* define itr.i and itr.d in ia64_itr function */
+#define ITR 0x01
+#define DTR 0x02
+#define IaDTR 0x03
+
+#define IA64_TR_VMM 6 /*itr6, dtr6 : maps vmm code, vmbuffer*/
+#define IA64_TR_VM_DATA 7 /*dtr7 : maps current vm data*/
+
+#define RR6 (6UL<<61)
+#define RR7 (7UL<<61)
+
+
+/* config_options in pal_vp_init_env */
+#define VP_INITIALIZE 1UL
+#define VP_FR_PMC 1UL<<1
+#define VP_OPCODE 1UL<<8
+#define VP_CAUSE 1UL<<9
+#define VP_FW_ACC 1UL<<63
+
+/* init vp env with initializing vm_buffer */
+#define VP_INIT_ENV_INITALIZE (VP_INITIALIZE | VP_FR_PMC |\
+ VP_OPCODE | VP_CAUSE | VP_FW_ACC)
+/* init vp env without initializing vm_buffer */
+#define VP_INIT_ENV VP_FR_PMC | VP_OPCODE | VP_CAUSE | VP_FW_ACC
+
+#define PAL_VP_CREATE 265
+/* Stacked Virt. Initializes a new VPD for the operation of
+ * a new virtual processor in the virtual environment.
+ */
+#define PAL_VP_ENV_INFO 266
+/*Stacked Virt. Returns the parameters needed to enter a virtual environment.*/
+#define PAL_VP_EXIT_ENV 267
+/*Stacked Virt. Allows a logical processor to exit a virtual environment.*/
+#define PAL_VP_INIT_ENV 268
+/*Stacked Virt. Allows a logical processor to enter a virtual environment.*/
+#define PAL_VP_REGISTER 269
+/*Stacked Virt. Register a different host IVT for the virtual processor.*/
+#define PAL_VP_RESUME 270
+/* Renamed from PAL_VP_RESUME */
+#define PAL_VP_RESTORE 270
+/*Stacked Virt. Resumes virtual processor operation on the logical processor.*/
+#define PAL_VP_SUSPEND 271
+/* Renamed from PAL_VP_SUSPEND */
+#define PAL_VP_SAVE 271
+/* Stacked Virt. Suspends operation for the specified virtual processor on
+ * the logical processor.
+ */
+#define PAL_VP_TERMINATE 272
+/* Stacked Virt. Terminates operation for the specified virtual processor.*/
+
+union vac {
+ unsigned long value;
+ struct {
+ int a_int:1;
+ int a_from_int_cr:1;
+ int a_to_int_cr:1;
+ int a_from_psr:1;
+ int a_from_cpuid:1;
+ int a_cover:1;
+ int a_bsw:1;
+ long reserved:57;
+ };
+};
+
+union vdc {
+ unsigned long value;
+ struct {
+ int d_vmsw:1;
+ int d_extint:1;
+ int d_ibr_dbr:1;
+ int d_pmc:1;
+ int d_to_pmd:1;
+ int d_itm:1;
+ long reserved:58;
+ };
+};
+
+struct vpd {
+ union vac vac;
+ union vdc vdc;
+ unsigned long virt_env_vaddr;
+ unsigned long reserved1[29];
+ unsigned long vhpi;
+ unsigned long reserved2[95];
+ unsigned long vgr[16];
+ unsigned long vbgr[16];
+ unsigned long vnat;
+ unsigned long vbnat;
+ unsigned long vcpuid[5];
+ unsigned long reserved3[11];
+ unsigned long vpsr;
+ unsigned long vpr;
+ unsigned long reserved4[76];
+ union {
+ unsigned long vcr[128];
+ struct {
+ unsigned long dcr;
+ unsigned long itm;
+ unsigned long iva;
+ unsigned long rsv1[5];
+ unsigned long pta;
+ unsigned long rsv2[7];
+ unsigned long ipsr;
+ unsigned long isr;
+ unsigned long rsv3;
+ unsigned long iip;
+ unsigned long ifa;
+ unsigned long itir;
+ unsigned long iipa;
+ unsigned long ifs;
+ unsigned long iim;
+ unsigned long iha;
+ unsigned long rsv4[38];
+ unsigned long lid;
+ unsigned long ivr;
+ unsigned long tpr;
+ unsigned long eoi;
+ unsigned long irr[4];
+ unsigned long itv;
+ unsigned long pmv;
+ unsigned long cmcv;
+ unsigned long rsv5[5];
+ unsigned long lrr0;
+ unsigned long lrr1;
+ unsigned long rsv6[46];
+ };
+ };
+ unsigned long reserved5[128];
+ unsigned long reserved6[3456];
+ unsigned long vmm_avail[128];
+ unsigned long reserved7[4096];
+};
+
+#define PAL_PROC_VM_BIT (1UL << 40)
+#define PAL_PROC_VMSW_BIT (1UL << 54)
+
+static inline s64 ia64_pal_vp_env_info(u64 *buffer_size,
+ u64 *vp_env_info)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_STK(iprv, PAL_VP_ENV_INFO, 0, 0, 0);
+ *buffer_size = iprv.v0;
+ *vp_env_info = iprv.v1;
+ return iprv.status;
+}
+
+static inline s64 ia64_pal_vp_exit_env(u64 iva)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL_STK(iprv, PAL_VP_EXIT_ENV, (u64)iva, 0, 0);
+ return iprv.status;
+}
+
+static inline s64 ia64_pal_vp_init_env(u64 config_options, u64 pbase_addr,
+ u64 vbase_addr, u64 *vsa_base)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL_STK(iprv, PAL_VP_INIT_ENV, config_options, pbase_addr,
+ vbase_addr);
+ *vsa_base = iprv.v0;
+
+ return iprv.status;
+}
+
+static inline s64 ia64_pal_vp_restore(u64 *vpd, u64 pal_proc_vector)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL_STK(iprv, PAL_VP_RESTORE, (u64)vpd, pal_proc_vector, 0);
+
+ return iprv.status;
+}
+
+static inline s64 ia64_pal_vp_save(u64 *vpd, u64 pal_proc_vector)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL_STK(iprv, PAL_VP_SAVE, (u64)vpd, pal_proc_vector, 0);
+
+ return iprv.status;
+}
+
+#endif
+
+/*VPD field offset*/
+#define VPD_VAC_START_OFFSET 0
+#define VPD_VDC_START_OFFSET 8
+#define VPD_VHPI_START_OFFSET 256
+#define VPD_VGR_START_OFFSET 1024
+#define VPD_VBGR_START_OFFSET 1152
+#define VPD_VNAT_START_OFFSET 1280
+#define VPD_VBNAT_START_OFFSET 1288
+#define VPD_VCPUID_START_OFFSET 1296
+#define VPD_VPSR_START_OFFSET 1424
+#define VPD_VPR_START_OFFSET 1432
+#define VPD_VRSE_CFLE_START_OFFSET 1440
+#define VPD_VCR_START_OFFSET 2048
+#define VPD_VTPR_START_OFFSET 2576
+#define VPD_VRR_START_OFFSET 3072
+#define VPD_VMM_VAIL_START_OFFSET 31744
+
+/*Virtualization faults*/
+
+#define EVENT_MOV_TO_AR 1
+#define EVENT_MOV_TO_AR_IMM 2
+#define EVENT_MOV_FROM_AR 3
+#define EVENT_MOV_TO_CR 4
+#define EVENT_MOV_FROM_CR 5
+#define EVENT_MOV_TO_PSR 6
+#define EVENT_MOV_FROM_PSR 7
+#define EVENT_ITC_D 8
+#define EVENT_ITC_I 9
+#define EVENT_MOV_TO_RR 10
+#define EVENT_MOV_TO_DBR 11
+#define EVENT_MOV_TO_IBR 12
+#define EVENT_MOV_TO_PKR 13
+#define EVENT_MOV_TO_PMC 14
+#define EVENT_MOV_TO_PMD 15
+#define EVENT_ITR_D 16
+#define EVENT_ITR_I 17
+#define EVENT_MOV_FROM_RR 18
+#define EVENT_MOV_FROM_DBR 19
+#define EVENT_MOV_FROM_IBR 20
+#define EVENT_MOV_FROM_PKR 21
+#define EVENT_MOV_FROM_PMC 22
+#define EVENT_MOV_FROM_CPUID 23
+#define EVENT_SSM 24
+#define EVENT_RSM 25
+#define EVENT_PTC_L 26
+#define EVENT_PTC_G 27
+#define EVENT_PTC_GA 28
+#define EVENT_PTR_D 29
+#define EVENT_PTR_I 30
+#define EVENT_THASH 31
+#define EVENT_TTAG 32
+#define EVENT_TPA 33
+#define EVENT_TAK 34
+#define EVENT_PTC_E 35
+#define EVENT_COVER 36
+#define EVENT_RFI 37
+#define EVENT_BSW_0 38
+#define EVENT_BSW_1 39
+#define EVENT_VMSW 40
+
+/**PAL virtual services offsets */
+#define PAL_VPS_RESUME_NORMAL 0x0000
+#define PAL_VPS_RESUME_HANDLER 0x0400
+#define PAL_VPS_SYNC_READ 0x0800
+#define PAL_VPS_SYNC_WRITE 0x0c00
+#define PAL_VPS_SET_PENDING_INTERRUPT 0x1000
+#define PAL_VPS_THASH 0x1400
+#define PAL_VPS_TTAG 0x1800
+#define PAL_VPS_RESTORE 0x1c00
+#define PAL_VPS_SAVE 0x2000
+
+#endif/* _VT_I_H*/
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
new file mode 100644
index 000000000000..def4576d22b1
--- /dev/null
+++ b/arch/ia64/kvm/vtlb.c
@@ -0,0 +1,636 @@
+/*
+ * vtlb.c: guest virtual tlb handling module.
+ * Copyright (c) 2004, Intel Corporation.
+ * Yaozu Dong (Eddie Dong) <Eddie.dong@intel.com>
+ * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
+ *
+ * Copyright (c) 2007, Intel Corporation.
+ * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
+ * Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include "vcpu.h"
+
+#include <linux/rwsem.h>
+
+#include <asm/tlb.h>
+
+/*
+ * Check to see if the address rid:va is translated by the TLB
+ */
+
+static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va)
+{
+ return ((trp->p) && (trp->rid == rid)
+ && ((va-trp->vadr) < PSIZE(trp->ps)));
+}
+
+/*
+ * Only for GUEST TR format.
+ */
+static int __is_tr_overlap(struct thash_data *trp, u64 rid, u64 sva, u64 eva)
+{
+ u64 sa1, ea1;
+
+ if (!trp->p || trp->rid != rid)
+ return 0;
+
+ sa1 = trp->vadr;
+ ea1 = sa1 + PSIZE(trp->ps) - 1;
+ eva -= 1;
+ if ((sva > ea1) || (sa1 > eva))
+ return 0;
+ else
+ return 1;
+
+}
+
+void machine_tlb_purge(u64 va, u64 ps)
+{
+ ia64_ptcl(va, ps << 2);
+}
+
+void local_flush_tlb_all(void)
+{
+ int i, j;
+ unsigned long flags, count0, count1;
+ unsigned long stride0, stride1, addr;
+
+ addr = current_vcpu->arch.ptce_base;
+ count0 = current_vcpu->arch.ptce_count[0];
+ count1 = current_vcpu->arch.ptce_count[1];
+ stride0 = current_vcpu->arch.ptce_stride[0];
+ stride1 = current_vcpu->arch.ptce_stride[1];
+
+ local_irq_save(flags);
+ for (i = 0; i < count0; ++i) {
+ for (j = 0; j < count1; ++j) {
+ ia64_ptce(addr);
+ addr += stride1;
+ }
+ addr += stride0;
+ }
+ local_irq_restore(flags);
+ ia64_srlz_i(); /* srlz.i implies srlz.d */
+}
+
+int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref)
+{
+ union ia64_rr vrr;
+ union ia64_pta vpta;
+ struct ia64_psr vpsr;
+
+ vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+ vrr.val = vcpu_get_rr(vcpu, vadr);
+ vpta.val = vcpu_get_pta(vcpu);
+
+ if (vrr.ve & vpta.ve) {
+ switch (ref) {
+ case DATA_REF:
+ case NA_REF:
+ return vpsr.dt;
+ case INST_REF:
+ return vpsr.dt && vpsr.it && vpsr.ic;
+ case RSE_REF:
+ return vpsr.dt && vpsr.rt;
+
+ }
+ }
+ return 0;
+}
+
+struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag)
+{
+ u64 index, pfn, rid, pfn_bits;
+
+ pfn_bits = vpta.size - 5 - 8;
+ pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr);
+ rid = _REGION_ID(vrr);
+ index = ((rid & 0xff) << pfn_bits)|(pfn & ((1UL << pfn_bits) - 1));
+ *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16);
+
+ return (struct thash_data *)((vpta.base << PTA_BASE_SHIFT) +
+ (index << 5));
+}
+
+struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type)
+{
+
+ struct thash_data *trp;
+ int i;
+ u64 rid;
+
+ rid = vcpu_get_rr(vcpu, va);
+ rid = rid & RR_RID_MASK;;
+ if (type == D_TLB) {
+ if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
+ for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
+ i < NDTRS; i++, trp++) {
+ if (__is_tr_translated(trp, rid, va))
+ return trp;
+ }
+ }
+ } else {
+ if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
+ for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
+ i < NITRS; i++, trp++) {
+ if (__is_tr_translated(trp, rid, va))
+ return trp;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
+{
+ union ia64_rr rr;
+ struct thash_data *head;
+ unsigned long ps, gpaddr;
+
+ ps = itir_ps(itir);
+
+ gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
+ (ifa & ((1UL << ps) - 1));
+
+ rr.val = ia64_get_rr(ifa);
+ head = (struct thash_data *)ia64_thash(ifa);
+ head->etag = INVALID_TI_TAG;
+ ia64_mf();
+ head->page_flags = pte & ~PAGE_FLAGS_RV_MASK;
+ head->itir = rr.ps << 2;
+ head->etag = ia64_ttag(ifa);
+ head->gpaddr = gpaddr;
+}
+
+void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
+{
+ u64 i, dirty_pages = 1;
+ u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
+ spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
+ void *dirty_bitmap = (void *)v - (KVM_VCPU_OFS + v->vcpu_id * VCPU_SIZE)
+ + KVM_MEM_DIRTY_LOG_OFS;
+ dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
+
+ vmm_spin_lock(lock);
+ for (i = 0; i < dirty_pages; i++) {
+ /* avoid RMW */
+ if (!test_bit(base_gfn + i, dirty_bitmap))
+ set_bit(base_gfn + i , dirty_bitmap);
+ }
+ vmm_spin_unlock(lock);
+}
+
+void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type)
+{
+ u64 phy_pte, psr;
+ union ia64_rr mrr;
+
+ mrr.val = ia64_get_rr(va);
+ phy_pte = translate_phy_pte(&pte, itir, va);
+
+ if (itir_ps(itir) >= mrr.ps) {
+ vhpt_insert(phy_pte, itir, va, pte);
+ } else {
+ phy_pte &= ~PAGE_FLAGS_RV_MASK;
+ psr = ia64_clear_ic();
+ ia64_itc(type, va, phy_pte, itir_ps(itir));
+ ia64_set_psr(psr);
+ }
+
+ if (!(pte&VTLB_PTE_IO))
+ mark_pages_dirty(v, pte, itir_ps(itir));
+}
+
+/*
+ * vhpt lookup
+ */
+struct thash_data *vhpt_lookup(u64 va)
+{
+ struct thash_data *head;
+ u64 tag;
+
+ head = (struct thash_data *)ia64_thash(va);
+ tag = ia64_ttag(va);
+ if (head->etag == tag)
+ return head;
+ return NULL;
+}
+
+u64 guest_vhpt_lookup(u64 iha, u64 *pte)
+{
+ u64 ret;
+ struct thash_data *data;
+
+ data = __vtr_lookup(current_vcpu, iha, D_TLB);
+ if (data != NULL)
+ thash_vhpt_insert(current_vcpu, data->page_flags,
+ data->itir, iha, D_TLB);
+
+ asm volatile ("rsm psr.ic|psr.i;;"
+ "srlz.d;;"
+ "ld8.s r9=[%1];;"
+ "tnat.nz p6,p7=r9;;"
+ "(p6) mov %0=1;"
+ "(p6) mov r9=r0;"
+ "(p7) extr.u r9=r9,0,53;;"
+ "(p7) mov %0=r0;"
+ "(p7) st8 [%2]=r9;;"
+ "ssm psr.ic;;"
+ "srlz.d;;"
+ /* "ssm psr.i;;" Once interrupts in vmm open, need fix*/
+ : "=r"(ret) : "r"(iha), "r"(pte):"memory");
+
+ return ret;
+}
+
+/*
+ * purge software guest tlb
+ */
+
+static void vtlb_purge(struct kvm_vcpu *v, u64 va, u64 ps)
+{
+ struct thash_data *cur;
+ u64 start, curadr, size, psbits, tag, rr_ps, num;
+ union ia64_rr vrr;
+ struct thash_cb *hcb = &v->arch.vtlb;
+
+ vrr.val = vcpu_get_rr(v, va);
+ psbits = VMX(v, psbits[(va >> 61)]);
+ start = va & ~((1UL << ps) - 1);
+ while (psbits) {
+ curadr = start;
+ rr_ps = __ffs(psbits);
+ psbits &= ~(1UL << rr_ps);
+ num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
+ size = PSIZE(rr_ps);
+ vrr.ps = rr_ps;
+ while (num) {
+ cur = vsa_thash(hcb->pta, curadr, vrr.val, &tag);
+ if (cur->etag == tag && cur->ps == rr_ps)
+ cur->etag = INVALID_TI_TAG;
+ curadr += size;
+ num--;
+ }
+ }
+}
+
+
+/*
+ * purge VHPT and machine TLB
+ */
+static void vhpt_purge(struct kvm_vcpu *v, u64 va, u64 ps)
+{
+ struct thash_data *cur;
+ u64 start, size, tag, num;
+ union ia64_rr rr;
+
+ start = va & ~((1UL << ps) - 1);
+ rr.val = ia64_get_rr(va);
+ size = PSIZE(rr.ps);
+ num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
+ while (num) {
+ cur = (struct thash_data *)ia64_thash(start);
+ tag = ia64_ttag(start);
+ if (cur->etag == tag)
+ cur->etag = INVALID_TI_TAG;
+ start += size;
+ num--;
+ }
+ machine_tlb_purge(va, ps);
+}
+
+/*
+ * Insert an entry into hash TLB or VHPT.
+ * NOTES:
+ * 1: When inserting VHPT to thash, "va" is a must covered
+ * address by the inserted machine VHPT entry.
+ * 2: The format of entry is always in TLB.
+ * 3: The caller need to make sure the new entry will not overlap
+ * with any existed entry.
+ */
+void vtlb_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va)
+{
+ struct thash_data *head;
+ union ia64_rr vrr;
+ u64 tag;
+ struct thash_cb *hcb = &v->arch.vtlb;
+
+ vrr.val = vcpu_get_rr(v, va);
+ vrr.ps = itir_ps(itir);
+ VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
+ head = vsa_thash(hcb->pta, va, vrr.val, &tag);
+ head->page_flags = pte;
+ head->itir = itir;
+ head->etag = tag;
+}
+
+int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, u64 ps, int type)
+{
+ struct thash_data *trp;
+ int i;
+ u64 end, rid;
+
+ rid = vcpu_get_rr(vcpu, va);
+ rid = rid & RR_RID_MASK;
+ end = va + PSIZE(ps);
+ if (type == D_TLB) {
+ if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
+ for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
+ i < NDTRS; i++, trp++) {
+ if (__is_tr_overlap(trp, rid, va, end))
+ return i;
+ }
+ }
+ } else {
+ if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
+ for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
+ i < NITRS; i++, trp++) {
+ if (__is_tr_overlap(trp, rid, va, end))
+ return i;
+ }
+ }
+ }
+ return -1;
+}
+
+/*
+ * Purge entries in VTLB and VHPT
+ */
+void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps)
+{
+ if (vcpu_quick_region_check(v->arch.tc_regions, va))
+ vtlb_purge(v, va, ps);
+ vhpt_purge(v, va, ps);
+}
+
+void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps)
+{
+ u64 old_va = va;
+ va = REGION_OFFSET(va);
+ if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
+ vtlb_purge(v, va, ps);
+ vhpt_purge(v, va, ps);
+}
+
+u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
+{
+ u64 ps, ps_mask, paddr, maddr;
+ union pte_flags phy_pte;
+
+ ps = itir_ps(itir);
+ ps_mask = ~((1UL << ps) - 1);
+ phy_pte.val = *pte;
+ paddr = *pte;
+ paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
+ maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT);
+ if (maddr & GPFN_IO_MASK) {
+ *pte |= VTLB_PTE_IO;
+ return -1;
+ }
+ maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) |
+ (paddr & ~PAGE_MASK);
+ phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
+ return phy_pte.val;
+}
+
+/*
+ * Purge overlap TCs and then insert the new entry to emulate itc ops.
+ * Notes: Only TC entry can purge and insert.
+ * 1 indicates this is MMIO
+ */
+int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
+ u64 ifa, int type)
+{
+ u64 ps;
+ u64 phy_pte;
+ union ia64_rr vrr, mrr;
+ int ret = 0;
+
+ ps = itir_ps(itir);
+ vrr.val = vcpu_get_rr(v, ifa);
+ mrr.val = ia64_get_rr(ifa);
+
+ phy_pte = translate_phy_pte(&pte, itir, ifa);
+
+ /* Ensure WB attribute if pte is related to a normal mem page,
+ * which is required by vga acceleration since qemu maps shared
+ * vram buffer with WB.
+ */
+ if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) {
+ pte &= ~_PAGE_MA_MASK;
+ phy_pte &= ~_PAGE_MA_MASK;
+ }
+
+ if (pte & VTLB_PTE_IO)
+ ret = 1;
+
+ vtlb_purge(v, ifa, ps);
+ vhpt_purge(v, ifa, ps);
+
+ if (ps == mrr.ps) {
+ if (!(pte&VTLB_PTE_IO)) {
+ vhpt_insert(phy_pte, itir, ifa, pte);
+ } else {
+ vtlb_insert(v, pte, itir, ifa);
+ vcpu_quick_region_set(VMX(v, tc_regions), ifa);
+ }
+ } else if (ps > mrr.ps) {
+ vtlb_insert(v, pte, itir, ifa);
+ vcpu_quick_region_set(VMX(v, tc_regions), ifa);
+ if (!(pte&VTLB_PTE_IO))
+ vhpt_insert(phy_pte, itir, ifa, pte);
+ } else {
+ u64 psr;
+ phy_pte &= ~PAGE_FLAGS_RV_MASK;
+ psr = ia64_clear_ic();
+ ia64_itc(type, ifa, phy_pte, ps);
+ ia64_set_psr(psr);
+ }
+ if (!(pte&VTLB_PTE_IO))
+ mark_pages_dirty(v, pte, ps);
+
+ return ret;
+}
+
+/*
+ * Purge all TCs or VHPT entries including those in Hash table.
+ *
+ */
+
+void thash_purge_all(struct kvm_vcpu *v)
+{
+ int i;
+ struct thash_data *head;
+ struct thash_cb *vtlb, *vhpt;
+ vtlb = &v->arch.vtlb;
+ vhpt = &v->arch.vhpt;
+
+ for (i = 0; i < 8; i++)
+ VMX(v, psbits[i]) = 0;
+
+ head = vtlb->hash;
+ for (i = 0; i < vtlb->num; i++) {
+ head->page_flags = 0;
+ head->etag = INVALID_TI_TAG;
+ head->itir = 0;
+ head->next = 0;
+ head++;
+ };
+
+ head = vhpt->hash;
+ for (i = 0; i < vhpt->num; i++) {
+ head->page_flags = 0;
+ head->etag = INVALID_TI_TAG;
+ head->itir = 0;
+ head->next = 0;
+ head++;
+ };
+
+ local_flush_tlb_all();
+}
+
+
+/*
+ * Lookup the hash table and its collision chain to find an entry
+ * covering this address rid:va or the entry.
+ *
+ * INPUT:
+ * in: TLB format for both VHPT & TLB.
+ */
+
+struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
+{
+ struct thash_data *cch;
+ u64 psbits, ps, tag;
+ union ia64_rr vrr;
+
+ struct thash_cb *hcb = &v->arch.vtlb;
+
+ cch = __vtr_lookup(v, va, is_data);;
+ if (cch)
+ return cch;
+
+ if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0)
+ return NULL;
+
+ psbits = VMX(v, psbits[(va >> 61)]);
+ vrr.val = vcpu_get_rr(v, va);
+ while (psbits) {
+ ps = __ffs(psbits);
+ psbits &= ~(1UL << ps);
+ vrr.ps = ps;
+ cch = vsa_thash(hcb->pta, va, vrr.val, &tag);
+ if (cch->etag == tag && cch->ps == ps)
+ return cch;
+ }
+
+ return NULL;
+}
+
+
+/*
+ * Initialize internal control data before service.
+ */
+void thash_init(struct thash_cb *hcb, u64 sz)
+{
+ int i;
+ struct thash_data *head;
+
+ hcb->pta.val = (unsigned long)hcb->hash;
+ hcb->pta.vf = 1;
+ hcb->pta.ve = 1;
+ hcb->pta.size = sz;
+ head = hcb->hash;
+ for (i = 0; i < hcb->num; i++) {
+ head->page_flags = 0;
+ head->itir = 0;
+ head->etag = INVALID_TI_TAG;
+ head->next = 0;
+ head++;
+ }
+}
+
+u64 kvm_lookup_mpa(u64 gpfn)
+{
+ u64 *base = (u64 *) KVM_P2M_BASE;
+ return *(base + gpfn);
+}
+
+u64 kvm_gpa_to_mpa(u64 gpa)
+{
+ u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT);
+ return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
+}
+
+
+/*
+ * Fetch guest bundle code.
+ * INPUT:
+ * gip: guest ip
+ * pbundle: used to return fetched bundle.
+ */
+int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
+{
+ u64 gpip = 0; /* guest physical IP*/
+ u64 *vpa;
+ struct thash_data *tlb;
+ u64 maddr;
+
+ if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
+ /* I-side physical mode */
+ gpip = gip;
+ } else {
+ tlb = vtlb_lookup(vcpu, gip, I_TLB);
+ if (tlb)
+ gpip = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
+ (gip & (PSIZE(tlb->ps) - 1));
+ }
+ if (gpip) {
+ maddr = kvm_gpa_to_mpa(gpip);
+ } else {
+ tlb = vhpt_lookup(gip);
+ if (tlb == NULL) {
+ ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
+ return IA64_FAULT;
+ }
+ maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps)
+ | (gip & (PSIZE(tlb->ps) - 1));
+ }
+ vpa = (u64 *)__kvm_va(maddr);
+
+ pbundle->i64[0] = *vpa++;
+ pbundle->i64[1] = *vpa;
+
+ return IA64_NO_FAULT;
+}
+
+
+void kvm_init_vhpt(struct kvm_vcpu *v)
+{
+ v->arch.vhpt.num = VHPT_NUM_ENTRIES;
+ thash_init(&v->arch.vhpt, VHPT_SHIFT);
+ ia64_set_pta(v->arch.vhpt.pta.val);
+ /*Enable VHPT here?*/
+}
+
+void kvm_init_vtlb(struct kvm_vcpu *v)
+{
+ v->arch.vtlb.num = VTLB_NUM_ENTRIES;
+ thash_init(&v->arch.vtlb, VTLB_SHIFT);
+}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 5c1de53c8c1c..fc6c6636ffda 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -682,15 +682,6 @@ mem_init (void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-void online_page(struct page *page)
-{
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalram_pages++;
- num_physpages++;
-}
-
int arch_add_memory(int nid, u64 start, u64 size)
{
pg_data_t *pgdat;
diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c
index 2b412454cb41..ded7dd2f67b2 100644
--- a/arch/m68k/kernel/ints.c
+++ b/arch/m68k/kernel/ints.c
@@ -186,7 +186,7 @@ int setup_irq(unsigned int irq, struct irq_node *node)
if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
printk("%s: Incorrect IRQ %d from %s\n",
- __FUNCTION__, irq, node->devname);
+ __func__, irq, node->devname);
return -ENXIO;
}
@@ -249,7 +249,7 @@ void free_irq(unsigned int irq, void *dev_id)
unsigned long flags;
if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
- printk("%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
+ printk("%s: Incorrect IRQ %d\n", __func__, irq);
return;
}
@@ -267,7 +267,7 @@ void free_irq(unsigned int irq, void *dev_id)
node->handler = NULL;
} else
printk("%s: Removing probably wrong IRQ %d\n",
- __FUNCTION__, irq);
+ __func__, irq);
if (!irq_list[irq]) {
if (contr->shutdown)
@@ -288,7 +288,7 @@ void enable_irq(unsigned int irq)
if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
printk("%s: Incorrect IRQ %d\n",
- __FUNCTION__, irq);
+ __func__, irq);
return;
}
@@ -312,7 +312,7 @@ void disable_irq(unsigned int irq)
if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
printk("%s: Incorrect IRQ %d\n",
- __FUNCTION__, irq);
+ __func__, irq);
return;
}
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
index 50603d3dce84..3c943d2ec570 100644
--- a/arch/m68k/mac/oss.c
+++ b/arch/m68k/mac/oss.c
@@ -190,7 +190,7 @@ void oss_irq_enable(int irq) {
break;
#ifdef DEBUG_IRQUSE
default:
- printk("%s unknown irq %d\n",__FUNCTION__, irq);
+ printk("%s unknown irq %d\n", __func__, irq);
break;
#endif
}
@@ -230,7 +230,7 @@ void oss_irq_disable(int irq) {
break;
#ifdef DEBUG_IRQUSE
default:
- printk("%s unknown irq %d\n", __FUNCTION__, irq);
+ printk("%s unknown irq %d\n", __func__, irq);
break;
#endif
}
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index f42caa79e4e8..a2bb01f59642 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -79,7 +79,6 @@ void show_mem(void)
printk("\nMem-info:\n");
show_free_areas();
- printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
for_each_online_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; i++) {
struct page *page = pgdat->node_mem_map + i;
diff --git a/arch/m68k/q40/q40ints.c b/arch/m68k/q40/q40ints.c
index 46161cef08b9..9f0e3d59bf92 100644
--- a/arch/m68k/q40/q40ints.c
+++ b/arch/m68k/q40/q40ints.c
@@ -47,7 +47,7 @@ static int q40_irq_startup(unsigned int irq)
switch (irq) {
case 1: case 2: case 8: case 9:
case 11: case 12: case 13:
- printk("%s: ISA IRQ %d not implemented by HW\n", __FUNCTION__, irq);
+ printk("%s: ISA IRQ %d not implemented by HW\n", __func__, irq);
return -ENXIO;
}
return 0;
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index ca136298acdc..5bf03b3c4150 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -17,252 +17,252 @@
#include <asm/ptrace.h>
#include <asm/processor.h>
-#define text(t) __asm__("\n@@@" t)
+#define text(t) __asm__("\n->#" t)
#define _offset(type, member) (&(((type *)NULL)->member))
#define offset(string, ptr, member) \
- __asm__("\n@@@" string "%0" : : "i" (_offset(ptr, member)))
+ __asm__("\n->" string " %0" : : "i" (_offset(ptr, member)))
#define constant(string, member) \
- __asm__("\n@@@" string "%X0" : : "ri" (member))
+ __asm__("\n->" string " %0" : : "ri" (member))
#define size(string, size) \
- __asm__("\n@@@" string "%0" : : "i" (sizeof(size)))
+ __asm__("\n->" string " %0" : : "i" (sizeof(size)))
#define linefeed text("")
void output_ptreg_defines(void)
{
- text("/* MIPS pt_regs offsets. */");
- offset("#define PT_R0 ", struct pt_regs, regs[0]);
- offset("#define PT_R1 ", struct pt_regs, regs[1]);
- offset("#define PT_R2 ", struct pt_regs, regs[2]);
- offset("#define PT_R3 ", struct pt_regs, regs[3]);
- offset("#define PT_R4 ", struct pt_regs, regs[4]);
- offset("#define PT_R5 ", struct pt_regs, regs[5]);
- offset("#define PT_R6 ", struct pt_regs, regs[6]);
- offset("#define PT_R7 ", struct pt_regs, regs[7]);
- offset("#define PT_R8 ", struct pt_regs, regs[8]);
- offset("#define PT_R9 ", struct pt_regs, regs[9]);
- offset("#define PT_R10 ", struct pt_regs, regs[10]);
- offset("#define PT_R11 ", struct pt_regs, regs[11]);
- offset("#define PT_R12 ", struct pt_regs, regs[12]);
- offset("#define PT_R13 ", struct pt_regs, regs[13]);
- offset("#define PT_R14 ", struct pt_regs, regs[14]);
- offset("#define PT_R15 ", struct pt_regs, regs[15]);
- offset("#define PT_R16 ", struct pt_regs, regs[16]);
- offset("#define PT_R17 ", struct pt_regs, regs[17]);
- offset("#define PT_R18 ", struct pt_regs, regs[18]);
- offset("#define PT_R19 ", struct pt_regs, regs[19]);
- offset("#define PT_R20 ", struct pt_regs, regs[20]);
- offset("#define PT_R21 ", struct pt_regs, regs[21]);
- offset("#define PT_R22 ", struct pt_regs, regs[22]);
- offset("#define PT_R23 ", struct pt_regs, regs[23]);
- offset("#define PT_R24 ", struct pt_regs, regs[24]);
- offset("#define PT_R25 ", struct pt_regs, regs[25]);
- offset("#define PT_R26 ", struct pt_regs, regs[26]);
- offset("#define PT_R27 ", struct pt_regs, regs[27]);
- offset("#define PT_R28 ", struct pt_regs, regs[28]);
- offset("#define PT_R29 ", struct pt_regs, regs[29]);
- offset("#define PT_R30 ", struct pt_regs, regs[30]);
- offset("#define PT_R31 ", struct pt_regs, regs[31]);
- offset("#define PT_LO ", struct pt_regs, lo);
- offset("#define PT_HI ", struct pt_regs, hi);
+ text("MIPS pt_regs offsets.");
+ offset("PT_R0", struct pt_regs, regs[0]);
+ offset("PT_R1", struct pt_regs, regs[1]);
+ offset("PT_R2", struct pt_regs, regs[2]);
+ offset("PT_R3", struct pt_regs, regs[3]);
+ offset("PT_R4", struct pt_regs, regs[4]);
+ offset("PT_R5", struct pt_regs, regs[5]);
+ offset("PT_R6", struct pt_regs, regs[6]);
+ offset("PT_R7", struct pt_regs, regs[7]);
+ offset("PT_R8", struct pt_regs, regs[8]);
+ offset("PT_R9", struct pt_regs, regs[9]);
+ offset("PT_R10", struct pt_regs, regs[10]);
+ offset("PT_R11", struct pt_regs, regs[11]);
+ offset("PT_R12", struct pt_regs, regs[12]);
+ offset("PT_R13", struct pt_regs, regs[13]);
+ offset("PT_R14", struct pt_regs, regs[14]);
+ offset("PT_R15", struct pt_regs, regs[15]);
+ offset("PT_R16", struct pt_regs, regs[16]);
+ offset("PT_R17", struct pt_regs, regs[17]);
+ offset("PT_R18", struct pt_regs, regs[18]);
+ offset("PT_R19", struct pt_regs, regs[19]);
+ offset("PT_R20", struct pt_regs, regs[20]);
+ offset("PT_R21", struct pt_regs, regs[21]);
+ offset("PT_R22", struct pt_regs, regs[22]);
+ offset("PT_R23", struct pt_regs, regs[23]);
+ offset("PT_R24", struct pt_regs, regs[24]);
+ offset("PT_R25", struct pt_regs, regs[25]);
+ offset("PT_R26", struct pt_regs, regs[26]);
+ offset("PT_R27", struct pt_regs, regs[27]);
+ offset("PT_R28", struct pt_regs, regs[28]);
+ offset("PT_R29", struct pt_regs, regs[29]);
+ offset("PT_R30", struct pt_regs, regs[30]);
+ offset("PT_R31", struct pt_regs, regs[31]);
+ offset("PT_LO", struct pt_regs, lo);
+ offset("PT_HI", struct pt_regs, hi);
#ifdef CONFIG_CPU_HAS_SMARTMIPS
- offset("#define PT_ACX ", struct pt_regs, acx);
+ offset("PT_ACX", struct pt_regs, acx);
#endif
- offset("#define PT_EPC ", struct pt_regs, cp0_epc);
- offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr);
- offset("#define PT_STATUS ", struct pt_regs, cp0_status);
- offset("#define PT_CAUSE ", struct pt_regs, cp0_cause);
+ offset("PT_EPC", struct pt_regs, cp0_epc);
+ offset("PT_BVADDR", struct pt_regs, cp0_badvaddr);
+ offset("PT_STATUS", struct pt_regs, cp0_status);
+ offset("PT_CAUSE", struct pt_regs, cp0_cause);
#ifdef CONFIG_MIPS_MT_SMTC
- offset("#define PT_TCSTATUS ", struct pt_regs, cp0_tcstatus);
+ offset("PT_TCSTATUS", struct pt_regs, cp0_tcstatus);
#endif /* CONFIG_MIPS_MT_SMTC */
- size("#define PT_SIZE ", struct pt_regs);
+ size("PT_SIZE", struct pt_regs);
linefeed;
}
void output_task_defines(void)
{
- text("/* MIPS task_struct offsets. */");
- offset("#define TASK_STATE ", struct task_struct, state);
- offset("#define TASK_THREAD_INFO ", struct task_struct, stack);
- offset("#define TASK_FLAGS ", struct task_struct, flags);
- offset("#define TASK_MM ", struct task_struct, mm);
- offset("#define TASK_PID ", struct task_struct, pid);
- size( "#define TASK_STRUCT_SIZE ", struct task_struct);
+ text("MIPS task_struct offsets.");
+ offset("TASK_STATE", struct task_struct, state);
+ offset("TASK_THREAD_INFO", struct task_struct, stack);
+ offset("TASK_FLAGS", struct task_struct, flags);
+ offset("TASK_MM", struct task_struct, mm);
+ offset("TASK_PID", struct task_struct, pid);
+ size( "TASK_STRUCT_SIZE", struct task_struct);
linefeed;
}
void output_thread_info_defines(void)
{
- text("/* MIPS thread_info offsets. */");
- offset("#define TI_TASK ", struct thread_info, task);
- offset("#define TI_EXEC_DOMAIN ", struct thread_info, exec_domain);
- offset("#define TI_FLAGS ", struct thread_info, flags);
- offset("#define TI_TP_VALUE ", struct thread_info, tp_value);
- offset("#define TI_CPU ", struct thread_info, cpu);
- offset("#define TI_PRE_COUNT ", struct thread_info, preempt_count);
- offset("#define TI_ADDR_LIMIT ", struct thread_info, addr_limit);
- offset("#define TI_RESTART_BLOCK ", struct thread_info, restart_block);
- offset("#define TI_REGS ", struct thread_info, regs);
- constant("#define _THREAD_SIZE ", THREAD_SIZE);
- constant("#define _THREAD_MASK ", THREAD_MASK);
+ text("MIPS thread_info offsets.");
+ offset("TI_TASK", struct thread_info, task);
+ offset("TI_EXEC_DOMAIN", struct thread_info, exec_domain);
+ offset("TI_FLAGS", struct thread_info, flags);
+ offset("TI_TP_VALUE", struct thread_info, tp_value);
+ offset("TI_CPU", struct thread_info, cpu);
+ offset("TI_PRE_COUNT", struct thread_info, preempt_count);
+ offset("TI_ADDR_LIMIT", struct thread_info, addr_limit);
+ offset("TI_RESTART_BLOCK", struct thread_info, restart_block);
+ offset("TI_REGS", struct thread_info, regs);
+ constant("_THREAD_SIZE", THREAD_SIZE);
+ constant("_THREAD_MASK", THREAD_MASK);
linefeed;
}
void output_thread_defines(void)
{
- text("/* MIPS specific thread_struct offsets. */");
- offset("#define THREAD_REG16 ", struct task_struct, thread.reg16);
- offset("#define THREAD_REG17 ", struct task_struct, thread.reg17);
- offset("#define THREAD_REG18 ", struct task_struct, thread.reg18);
- offset("#define THREAD_REG19 ", struct task_struct, thread.reg19);
- offset("#define THREAD_REG20 ", struct task_struct, thread.reg20);
- offset("#define THREAD_REG21 ", struct task_struct, thread.reg21);
- offset("#define THREAD_REG22 ", struct task_struct, thread.reg22);
- offset("#define THREAD_REG23 ", struct task_struct, thread.reg23);
- offset("#define THREAD_REG29 ", struct task_struct, thread.reg29);
- offset("#define THREAD_REG30 ", struct task_struct, thread.reg30);
- offset("#define THREAD_REG31 ", struct task_struct, thread.reg31);
- offset("#define THREAD_STATUS ", struct task_struct,
+ text("MIPS specific thread_struct offsets.");
+ offset("THREAD_REG16", struct task_struct, thread.reg16);
+ offset("THREAD_REG17", struct task_struct, thread.reg17);
+ offset("THREAD_REG18", struct task_struct, thread.reg18);
+ offset("THREAD_REG19", struct task_struct, thread.reg19);
+ offset("THREAD_REG20", struct task_struct, thread.reg20);
+ offset("THREAD_REG21", struct task_struct, thread.reg21);
+ offset("THREAD_REG22", struct task_struct, thread.reg22);
+ offset("THREAD_REG23", struct task_struct, thread.reg23);
+ offset("THREAD_REG29", struct task_struct, thread.reg29);
+ offset("THREAD_REG30", struct task_struct, thread.reg30);
+ offset("THREAD_REG31", struct task_struct, thread.reg31);
+ offset("THREAD_STATUS", struct task_struct,
thread.cp0_status);
- offset("#define THREAD_FPU ", struct task_struct, thread.fpu);
+ offset("THREAD_FPU", struct task_struct, thread.fpu);
- offset("#define THREAD_BVADDR ", struct task_struct, \
+ offset("THREAD_BVADDR", struct task_struct, \
thread.cp0_badvaddr);
- offset("#define THREAD_BUADDR ", struct task_struct, \
+ offset("THREAD_BUADDR", struct task_struct, \
thread.cp0_baduaddr);
- offset("#define THREAD_ECODE ", struct task_struct, \
+ offset("THREAD_ECODE", struct task_struct, \
thread.error_code);
- offset("#define THREAD_TRAPNO ", struct task_struct, thread.trap_no);
- offset("#define THREAD_TRAMP ", struct task_struct, \
+ offset("THREAD_TRAPNO", struct task_struct, thread.trap_no);
+ offset("THREAD_TRAMP", struct task_struct, \
thread.irix_trampoline);
- offset("#define THREAD_OLDCTX ", struct task_struct, \
+ offset("THREAD_OLDCTX", struct task_struct, \
thread.irix_oldctx);
linefeed;
}
void output_thread_fpu_defines(void)
{
- offset("#define THREAD_FPR0 ",
+ offset("THREAD_FPR0",
struct task_struct, thread.fpu.fpr[0]);
- offset("#define THREAD_FPR1 ",
+ offset("THREAD_FPR1",
struct task_struct, thread.fpu.fpr[1]);
- offset("#define THREAD_FPR2 ",
+ offset("THREAD_FPR2",
struct task_struct, thread.fpu.fpr[2]);
- offset("#define THREAD_FPR3 ",
+ offset("THREAD_FPR3",
struct task_struct, thread.fpu.fpr[3]);
- offset("#define THREAD_FPR4 ",
+ offset("THREAD_FPR4",
struct task_struct, thread.fpu.fpr[4]);
- offset("#define THREAD_FPR5 ",
+ offset("THREAD_FPR5",
struct task_struct, thread.fpu.fpr[5]);
- offset("#define THREAD_FPR6 ",
+ offset("THREAD_FPR6",
struct task_struct, thread.fpu.fpr[6]);
- offset("#define THREAD_FPR7 ",
+ offset("THREAD_FPR7",
struct task_struct, thread.fpu.fpr[7]);
- offset("#define THREAD_FPR8 ",
+ offset("THREAD_FPR8",
struct task_struct, thread.fpu.fpr[8]);
- offset("#define THREAD_FPR9 ",
+ offset("THREAD_FPR9",
struct task_struct, thread.fpu.fpr[9]);
- offset("#define THREAD_FPR10 ",
+ offset("THREAD_FPR10",
struct task_struct, thread.fpu.fpr[10]);
- offset("#define THREAD_FPR11 ",
+ offset("THREAD_FPR11",
struct task_struct, thread.fpu.fpr[11]);
- offset("#define THREAD_FPR12 ",
+ offset("THREAD_FPR12",
struct task_struct, thread.fpu.fpr[12]);
- offset("#define THREAD_FPR13 ",
+ offset("THREAD_FPR13",
struct task_struct, thread.fpu.fpr[13]);
- offset("#define THREAD_FPR14 ",
+ offset("THREAD_FPR14",
struct task_struct, thread.fpu.fpr[14]);
- offset("#define THREAD_FPR15 ",
+ offset("THREAD_FPR15",
struct task_struct, thread.fpu.fpr[15]);
- offset("#define THREAD_FPR16 ",
+ offset("THREAD_FPR16",
struct task_struct, thread.fpu.fpr[16]);
- offset("#define THREAD_FPR17 ",
+ offset("THREAD_FPR17",
struct task_struct, thread.fpu.fpr[17]);
- offset("#define THREAD_FPR18 ",
+ offset("THREAD_FPR18",
struct task_struct, thread.fpu.fpr[18]);
- offset("#define THREAD_FPR19 ",
+ offset("THREAD_FPR19",
struct task_struct, thread.fpu.fpr[19]);
- offset("#define THREAD_FPR20 ",
+ offset("THREAD_FPR20",
struct task_struct, thread.fpu.fpr[20]);
- offset("#define THREAD_FPR21 ",
+ offset("THREAD_FPR21",
struct task_struct, thread.fpu.fpr[21]);
- offset("#define THREAD_FPR22 ",
+ offset("THREAD_FPR22",
struct task_struct, thread.fpu.fpr[22]);
- offset("#define THREAD_FPR23 ",
+ offset("THREAD_FPR23",
struct task_struct, thread.fpu.fpr[23]);
- offset("#define THREAD_FPR24 ",
+ offset("THREAD_FPR24",
struct task_struct, thread.fpu.fpr[24]);
- offset("#define THREAD_FPR25 ",
+ offset("THREAD_FPR25",
struct task_struct, thread.fpu.fpr[25]);
- offset("#define THREAD_FPR26 ",
+ offset("THREAD_FPR26",
struct task_struct, thread.fpu.fpr[26]);
- offset("#define THREAD_FPR27 ",
+ offset("THREAD_FPR27",
struct task_struct, thread.fpu.fpr[27]);
- offset("#define THREAD_FPR28 ",
+ offset("THREAD_FPR28",
struct task_struct, thread.fpu.fpr[28]);
- offset("#define THREAD_FPR29 ",
+ offset("THREAD_FPR29",
struct task_struct, thread.fpu.fpr[29]);
- offset("#define THREAD_FPR30 ",
+ offset("THREAD_FPR30",
struct task_struct, thread.fpu.fpr[30]);
- offset("#define THREAD_FPR31 ",
+ offset("THREAD_FPR31",
struct task_struct, thread.fpu.fpr[31]);
- offset("#define THREAD_FCR31 ",
+ offset("THREAD_FCR31",
struct task_struct, thread.fpu.fcr31);
linefeed;
}
void output_mm_defines(void)
{
- text("/* Size of struct page */");
- size("#define STRUCT_PAGE_SIZE ", struct page);
+ text("Size of struct page");
+ size("STRUCT_PAGE_SIZE", struct page);
linefeed;
- text("/* Linux mm_struct offsets. */");
- offset("#define MM_USERS ", struct mm_struct, mm_users);
- offset("#define MM_PGD ", struct mm_struct, pgd);
- offset("#define MM_CONTEXT ", struct mm_struct, context);
+ text("Linux mm_struct offsets.");
+ offset("MM_USERS", struct mm_struct, mm_users);
+ offset("MM_PGD", struct mm_struct, pgd);
+ offset("MM_CONTEXT", struct mm_struct, context);
linefeed;
- constant("#define _PAGE_SIZE ", PAGE_SIZE);
- constant("#define _PAGE_SHIFT ", PAGE_SHIFT);
+ constant("_PAGE_SIZE", PAGE_SIZE);
+ constant("_PAGE_SHIFT", PAGE_SHIFT);
linefeed;
- constant("#define _PGD_T_SIZE ", sizeof(pgd_t));
- constant("#define _PMD_T_SIZE ", sizeof(pmd_t));
- constant("#define _PTE_T_SIZE ", sizeof(pte_t));
+ constant("_PGD_T_SIZE", sizeof(pgd_t));
+ constant("_PMD_T_SIZE", sizeof(pmd_t));
+ constant("_PTE_T_SIZE", sizeof(pte_t));
linefeed;
- constant("#define _PGD_T_LOG2 ", PGD_T_LOG2);
- constant("#define _PMD_T_LOG2 ", PMD_T_LOG2);
- constant("#define _PTE_T_LOG2 ", PTE_T_LOG2);
+ constant("_PGD_T_LOG2", PGD_T_LOG2);
+ constant("_PMD_T_LOG2", PMD_T_LOG2);
+ constant("_PTE_T_LOG2", PTE_T_LOG2);
linefeed;
- constant("#define _PGD_ORDER ", PGD_ORDER);
- constant("#define _PMD_ORDER ", PMD_ORDER);
- constant("#define _PTE_ORDER ", PTE_ORDER);
+ constant("_PGD_ORDER", PGD_ORDER);
+ constant("_PMD_ORDER", PMD_ORDER);
+ constant("_PTE_ORDER", PTE_ORDER);
linefeed;
- constant("#define _PMD_SHIFT ", PMD_SHIFT);
- constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT);
+ constant("_PMD_SHIFT", PMD_SHIFT);
+ constant("_PGDIR_SHIFT", PGDIR_SHIFT);
linefeed;
- constant("#define _PTRS_PER_PGD ", PTRS_PER_PGD);
- constant("#define _PTRS_PER_PMD ", PTRS_PER_PMD);
- constant("#define _PTRS_PER_PTE ", PTRS_PER_PTE);
+ constant("_PTRS_PER_PGD", PTRS_PER_PGD);
+ constant("_PTRS_PER_PMD", PTRS_PER_PMD);
+ constant("_PTRS_PER_PTE", PTRS_PER_PTE);
linefeed;
}
#ifdef CONFIG_32BIT
void output_sc_defines(void)
{
- text("/* Linux sigcontext offsets. */");
- offset("#define SC_REGS ", struct sigcontext, sc_regs);
- offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs);
- offset("#define SC_ACX ", struct sigcontext, sc_acx);
- offset("#define SC_MDHI ", struct sigcontext, sc_mdhi);
- offset("#define SC_MDLO ", struct sigcontext, sc_mdlo);
- offset("#define SC_PC ", struct sigcontext, sc_pc);
- offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr);
- offset("#define SC_FPC_EIR ", struct sigcontext, sc_fpc_eir);
- offset("#define SC_HI1 ", struct sigcontext, sc_hi1);
- offset("#define SC_LO1 ", struct sigcontext, sc_lo1);
- offset("#define SC_HI2 ", struct sigcontext, sc_hi2);
- offset("#define SC_LO2 ", struct sigcontext, sc_lo2);
- offset("#define SC_HI3 ", struct sigcontext, sc_hi3);
- offset("#define SC_LO3 ", struct sigcontext, sc_lo3);
+ text("Linux sigcontext offsets.");
+ offset("SC_REGS", struct sigcontext, sc_regs);
+ offset("SC_FPREGS", struct sigcontext, sc_fpregs);
+ offset("SC_ACX", struct sigcontext, sc_acx);
+ offset("SC_MDHI", struct sigcontext, sc_mdhi);
+ offset("SC_MDLO", struct sigcontext, sc_mdlo);
+ offset("SC_PC", struct sigcontext, sc_pc);
+ offset("SC_FPC_CSR", struct sigcontext, sc_fpc_csr);
+ offset("SC_FPC_EIR", struct sigcontext, sc_fpc_eir);
+ offset("SC_HI1", struct sigcontext, sc_hi1);
+ offset("SC_LO1", struct sigcontext, sc_lo1);
+ offset("SC_HI2", struct sigcontext, sc_hi2);
+ offset("SC_LO2", struct sigcontext, sc_lo2);
+ offset("SC_HI3", struct sigcontext, sc_hi3);
+ offset("SC_LO3", struct sigcontext, sc_lo3);
linefeed;
}
#endif
@@ -270,13 +270,13 @@ void output_sc_defines(void)
#ifdef CONFIG_64BIT
void output_sc_defines(void)
{
- text("/* Linux sigcontext offsets. */");
- offset("#define SC_REGS ", struct sigcontext, sc_regs);
- offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs);
- offset("#define SC_MDHI ", struct sigcontext, sc_mdhi);
- offset("#define SC_MDLO ", struct sigcontext, sc_mdlo);
- offset("#define SC_PC ", struct sigcontext, sc_pc);
- offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr);
+ text("Linux sigcontext offsets.");
+ offset("SC_REGS", struct sigcontext, sc_regs);
+ offset("SC_FPREGS", struct sigcontext, sc_fpregs);
+ offset("SC_MDHI", struct sigcontext, sc_mdhi);
+ offset("SC_MDLO", struct sigcontext, sc_mdlo);
+ offset("SC_PC", struct sigcontext, sc_pc);
+ offset("SC_FPC_CSR", struct sigcontext, sc_fpc_csr);
linefeed;
}
#endif
@@ -284,56 +284,56 @@ void output_sc_defines(void)
#ifdef CONFIG_MIPS32_COMPAT
void output_sc32_defines(void)
{
- text("/* Linux 32-bit sigcontext offsets. */");
- offset("#define SC32_FPREGS ", struct sigcontext32, sc_fpregs);
- offset("#define SC32_FPC_CSR ", struct sigcontext32, sc_fpc_csr);
- offset("#define SC32_FPC_EIR ", struct sigcontext32, sc_fpc_eir);
+ text("Linux 32-bit sigcontext offsets.");
+ offset("SC32_FPREGS", struct sigcontext32, sc_fpregs);
+ offset("SC32_FPC_CSR", struct sigcontext32, sc_fpc_csr);
+ offset("SC32_FPC_EIR", struct sigcontext32, sc_fpc_eir);
linefeed;
}
#endif
void output_signal_defined(void)
{
- text("/* Linux signal numbers. */");
- constant("#define _SIGHUP ", SIGHUP);
- constant("#define _SIGINT ", SIGINT);
- constant("#define _SIGQUIT ", SIGQUIT);
- constant("#define _SIGILL ", SIGILL);
- constant("#define _SIGTRAP ", SIGTRAP);
- constant("#define _SIGIOT ", SIGIOT);
- constant("#define _SIGABRT ", SIGABRT);
- constant("#define _SIGEMT ", SIGEMT);
- constant("#define _SIGFPE ", SIGFPE);
- constant("#define _SIGKILL ", SIGKILL);
- constant("#define _SIGBUS ", SIGBUS);
- constant("#define _SIGSEGV ", SIGSEGV);
- constant("#define _SIGSYS ", SIGSYS);
- constant("#define _SIGPIPE ", SIGPIPE);
- constant("#define _SIGALRM ", SIGALRM);
- constant("#define _SIGTERM ", SIGTERM);
- constant("#define _SIGUSR1 ", SIGUSR1);
- constant("#define _SIGUSR2 ", SIGUSR2);
- constant("#define _SIGCHLD ", SIGCHLD);
- constant("#define _SIGPWR ", SIGPWR);
- constant("#define _SIGWINCH ", SIGWINCH);
- constant("#define _SIGURG ", SIGURG);
- constant("#define _SIGIO ", SIGIO);
- constant("#define _SIGSTOP ", SIGSTOP);
- constant("#define _SIGTSTP ", SIGTSTP);
- constant("#define _SIGCONT ", SIGCONT);
- constant("#define _SIGTTIN ", SIGTTIN);
- constant("#define _SIGTTOU ", SIGTTOU);
- constant("#define _SIGVTALRM ", SIGVTALRM);
- constant("#define _SIGPROF ", SIGPROF);
- constant("#define _SIGXCPU ", SIGXCPU);
- constant("#define _SIGXFSZ ", SIGXFSZ);
+ text("Linux signal numbers.");
+ constant("_SIGHUP", SIGHUP);
+ constant("_SIGINT", SIGINT);
+ constant("_SIGQUIT", SIGQUIT);
+ constant("_SIGILL", SIGILL);
+ constant("_SIGTRAP", SIGTRAP);
+ constant("_SIGIOT", SIGIOT);
+ constant("_SIGABRT", SIGABRT);
+ constant("_SIGEMT", SIGEMT);
+ constant("_SIGFPE", SIGFPE);
+ constant("_SIGKILL", SIGKILL);
+ constant("_SIGBUS", SIGBUS);
+ constant("_SIGSEGV", SIGSEGV);
+ constant("_SIGSYS", SIGSYS);
+ constant("_SIGPIPE", SIGPIPE);
+ constant("_SIGALRM", SIGALRM);
+ constant("_SIGTERM", SIGTERM);
+ constant("_SIGUSR1", SIGUSR1);
+ constant("_SIGUSR2", SIGUSR2);
+ constant("_SIGCHLD", SIGCHLD);
+ constant("_SIGPWR", SIGPWR);
+ constant("_SIGWINCH", SIGWINCH);
+ constant("_SIGURG", SIGURG);
+ constant("_SIGIO", SIGIO);
+ constant("_SIGSTOP", SIGSTOP);
+ constant("_SIGTSTP", SIGTSTP);
+ constant("_SIGCONT", SIGCONT);
+ constant("_SIGTTIN", SIGTTIN);
+ constant("_SIGTTOU", SIGTTOU);
+ constant("_SIGVTALRM", SIGVTALRM);
+ constant("_SIGPROF", SIGPROF);
+ constant("_SIGXCPU", SIGXCPU);
+ constant("_SIGXFSZ", SIGXFSZ);
linefeed;
}
void output_irq_cpustat_t_defines(void)
{
- text("/* Linux irq_cpustat_t offsets. */");
- offset("#define IC_SOFTIRQ_PENDING ", irq_cpustat_t, __softirq_pending);
- size("#define IC_IRQ_CPUSTAT_T ", irq_cpustat_t);
+ text("Linux irq_cpustat_t offsets.");
+ offset("IC_SOFTIRQ_PENDING", irq_cpustat_t, __softirq_pending);
+ size("IC_IRQ_CPUSTAT_T", irq_cpustat_t);
linefeed;
}
diff --git a/arch/mips/vr41xx/common/init.c b/arch/mips/vr41xx/common/init.c
index 76d4b5ed3fc0..c64995342ba8 100644
--- a/arch/mips/vr41xx/common/init.c
+++ b/arch/mips/vr41xx/common/init.c
@@ -1,7 +1,7 @@
/*
* init.c, Common initialization routines for NEC VR4100 series.
*
- * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2003-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -53,6 +53,8 @@ void __init plat_time_init(void)
void __init plat_mem_setup(void)
{
iomem_resource_init();
+
+ vr41xx_siu_setup();
}
void __init prom_init(void)
diff --git a/arch/mips/vr41xx/common/siu.c b/arch/mips/vr41xx/common/siu.c
index b735f45b25f0..654dee6208be 100644
--- a/arch/mips/vr41xx/common/siu.c
+++ b/arch/mips/vr41xx/common/siu.c
@@ -1,7 +1,7 @@
/*
* NEC VR4100 series SIU platform device.
*
- * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -118,3 +118,37 @@ err_free_device:
return retval;
}
device_initcall(vr41xx_siu_add);
+
+void __init vr41xx_siu_setup(void)
+{
+ struct uart_port port;
+ struct resource *res;
+ unsigned int *type;
+ int i;
+
+ switch (current_cpu_type()) {
+ case CPU_VR4111:
+ case CPU_VR4121:
+ type = siu_type1_ports;
+ res = siu_type1_resource;
+ break;
+ case CPU_VR4122:
+ case CPU_VR4131:
+ case CPU_VR4133:
+ type = siu_type2_ports;
+ res = siu_type2_resource;
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < SIU_PORTS_MAX; i++) {
+ port.line = i;
+ port.type = type[i];
+ if (port.type == PORT_UNKNOWN)
+ break;
+ port.mapbase = res[i].start;
+ port.membase = (unsigned char __iomem *)KSEG1ADDR(res[i].start);
+ vr41xx_siu_early_setup(&port);
+ }
+}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index eb80f5e33d7d..1f012843150f 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -603,15 +603,18 @@ void show_mem(void)
#ifdef CONFIG_DISCONTIGMEM
{
struct zonelist *zl;
- int i, j, k;
+ int i, j;
for (i = 0; i < npmem_ranges; i++) {
+ zl = node_zonelist(i);
for (j = 0; j < MAX_NR_ZONES; j++) {
- zl = NODE_DATA(i)->node_zonelists + j;
+ struct zoneref *z;
+ struct zone *zone;
printk("Zone list for zone %d on node %d: ", j, i);
- for (k = 0; zl->zones[k] != NULL; k++)
- printk("[%d/%s] ", zone_to_nid(zl->zones[k]), zl->zones[k]->name);
+ for_each_zone_zonelist(zone, z, zl, j)
+ printk("[%d/%s] ", zone_to_nid(zone),
+ zone->name);
printk("\n");
}
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 20f45a8b87e3..4e40c122bf26 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -803,3 +803,4 @@ config PPC_CLOCK
config PPC_LIB_RHEAP
bool
+source "arch/powerpc/kvm/Kconfig"
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index a86d8d853214..807a2dce6263 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -151,6 +151,9 @@ config BOOTX_TEXT
config PPC_EARLY_DEBUG
bool "Early debugging (dangerous)"
+ # PPC_EARLY_DEBUG on 440 leaves AS=1 mappings above the TLB high water
+ # mark, which doesn't work with current 440 KVM.
+ depends on !KVM
help
Say Y to enable some early debugging facilities that may be available
for your processor/board combination. Those facilities are hacks
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index e2ec4a91ccef..9dcdc036cdf7 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -145,6 +145,7 @@ core-y += arch/powerpc/kernel/ \
arch/powerpc/platforms/
core-$(CONFIG_MATH_EMULATION) += arch/powerpc/math-emu/
core-$(CONFIG_XMON) += arch/powerpc/xmon/
+core-$(CONFIG_KVM) += arch/powerpc/kvm/
drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
diff --git a/arch/powerpc/boot/dts/mpc8610_hpcd.dts b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
index 16c947b8a721..1f2f1e0a5571 100644
--- a/arch/powerpc/boot/dts/mpc8610_hpcd.dts
+++ b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
@@ -45,6 +45,11 @@
reg = <0x00000000 0x20000000>; // 512M at 0x0
};
+ board-control@e8000000 {
+ compatible = "fsl,fpga-pixis";
+ reg = <0xe8000000 32>; // pixis at 0xe8000000
+ };
+
soc@e0000000 {
#address-cells = <1>;
#size-cells = <1>;
@@ -104,6 +109,13 @@
interrupt-parent = <&mpic>;
};
+ display@2c000 {
+ compatible = "fsl,diu";
+ reg = <0x2c000 100>;
+ interrupts = <72 2>;
+ interrupt-parent = <&mpic>;
+ };
+
mpic: interrupt-controller@40000 {
clock-frequency = <0>;
interrupt-controller;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index adf1d09d726f..62134845af08 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -23,6 +23,9 @@
#include <linux/mm.h>
#include <linux/suspend.h>
#include <linux/hrtimer.h>
+#ifdef CONFIG_KVM
+#include <linux/kvm_host.h>
+#endif
#ifdef CONFIG_PPC64
#include <linux/time.h>
#include <linux/hardirq.h>
@@ -324,5 +327,30 @@ int main(void)
DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
+#ifdef CONFIG_KVM
+ DEFINE(TLBE_BYTES, sizeof(struct tlbe));
+
+ DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
+ DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
+ DEFINE(VCPU_HOST_TLB, offsetof(struct kvm_vcpu, arch.host_tlb));
+ DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb));
+ DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
+ DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
+ DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
+ DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
+ DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
+ DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
+ DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
+ DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
+ DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
+ DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
+ DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
+ DEFINE(VCPU_PID, offsetof(struct kvm_vcpu, arch.pid));
+
+ DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
+ DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
+ DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
+#endif
+
return 0;
}
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
new file mode 100644
index 000000000000..f5d7a5eab96e
--- /dev/null
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -0,0 +1,224 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm_host.h>
+#include <linux/highmem.h>
+#include <asm/mmu-44x.h>
+#include <asm/kvm_ppc.h>
+
+#include "44x_tlb.h"
+
+#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
+#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
+
+static unsigned int kvmppc_tlb_44x_pos;
+
+static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
+{
+ /* Mask off reserved bits. */
+ attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_ATTR_MASK;
+
+ if (!usermode) {
+ /* Guest is in supervisor mode, so we need to translate guest
+ * supervisor permissions into user permissions. */
+ attrib &= ~PPC44x_TLB_USER_PERM_MASK;
+ attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
+ }
+
+ /* Make sure host can always access this memory. */
+ attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
+
+ return attrib;
+}
+
+/* Search the guest TLB for a matching entry. */
+int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
+ unsigned int as)
+{
+ int i;
+
+ /* XXX Replace loop with fancy data structures. */
+ for (i = 0; i < PPC44x_TLB_SIZE; i++) {
+ struct tlbe *tlbe = &vcpu->arch.guest_tlb[i];
+ unsigned int tid;
+
+ if (eaddr < get_tlb_eaddr(tlbe))
+ continue;
+
+ if (eaddr > get_tlb_end(tlbe))
+ continue;
+
+ tid = get_tlb_tid(tlbe);
+ if (tid && (tid != pid))
+ continue;
+
+ if (!get_tlb_v(tlbe))
+ continue;
+
+ if (get_tlb_ts(tlbe) != as)
+ continue;
+
+ return i;
+ }
+
+ return -1;
+}
+
+struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
+{
+ unsigned int as = !!(vcpu->arch.msr & MSR_IS);
+ unsigned int index;
+
+ index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
+ if (index == -1)
+ return NULL;
+ return &vcpu->arch.guest_tlb[index];
+}
+
+struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
+{
+ unsigned int as = !!(vcpu->arch.msr & MSR_DS);
+ unsigned int index;
+
+ index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
+ if (index == -1)
+ return NULL;
+ return &vcpu->arch.guest_tlb[index];
+}
+
+static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe)
+{
+ return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
+}
+
+/* Must be called with mmap_sem locked for writing. */
+static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
+ unsigned int index)
+{
+ struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
+ struct page *page = vcpu->arch.shadow_pages[index];
+
+ kunmap(vcpu->arch.shadow_pages[index]);
+
+ if (get_tlb_v(stlbe)) {
+ if (kvmppc_44x_tlbe_is_writable(stlbe))
+ kvm_release_page_dirty(page);
+ else
+ kvm_release_page_clean(page);
+ }
+}
+
+/* Caller must ensure that the specified guest TLB entry is safe to insert into
+ * the shadow TLB. */
+void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
+ u32 flags)
+{
+ struct page *new_page;
+ struct tlbe *stlbe;
+ hpa_t hpaddr;
+ unsigned int victim;
+
+ /* Future optimization: don't overwrite the TLB entry containing the
+ * current PC (or stack?). */
+ victim = kvmppc_tlb_44x_pos++;
+ if (kvmppc_tlb_44x_pos > tlb_44x_hwater)
+ kvmppc_tlb_44x_pos = 0;
+ stlbe = &vcpu->arch.shadow_tlb[victim];
+
+ /* Get reference to new page. */
+ down_write(&current->mm->mmap_sem);
+ new_page = gfn_to_page(vcpu->kvm, gfn);
+ if (is_error_page(new_page)) {
+ printk(KERN_ERR "Couldn't get guest page!\n");
+ kvm_release_page_clean(new_page);
+ return;
+ }
+ hpaddr = page_to_phys(new_page);
+
+ /* Drop reference to old page. */
+ kvmppc_44x_shadow_release(vcpu, victim);
+ up_write(&current->mm->mmap_sem);
+
+ vcpu->arch.shadow_pages[victim] = new_page;
+
+ /* XXX Make sure (va, size) doesn't overlap any other
+ * entries. 440x6 user manual says the result would be
+ * "undefined." */
+
+ /* XXX what about AS? */
+
+ stlbe->tid = asid & 0xff;
+
+ /* Force TS=1 for all guest mappings. */
+ /* For now we hardcode 4KB mappings, but it will be important to
+ * use host large pages in the future. */
+ stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS
+ | PPC44x_TLB_4K;
+
+ stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
+ stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
+ vcpu->arch.msr & MSR_PR);
+}
+
+void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid)
+{
+ unsigned int pid = asid & 0xff;
+ int i;
+
+ /* XXX Replace loop with fancy data structures. */
+ down_write(&current->mm->mmap_sem);
+ for (i = 0; i <= tlb_44x_hwater; i++) {
+ struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
+ unsigned int tid;
+
+ if (!get_tlb_v(stlbe))
+ continue;
+
+ if (eaddr < get_tlb_eaddr(stlbe))
+ continue;
+
+ if (eaddr > get_tlb_end(stlbe))
+ continue;
+
+ tid = get_tlb_tid(stlbe);
+ if (tid && (tid != pid))
+ continue;
+
+ kvmppc_44x_shadow_release(vcpu, i);
+ stlbe->word0 = 0;
+ }
+ up_write(&current->mm->mmap_sem);
+}
+
+/* Invalidate all mappings, so that when they fault back in they will get the
+ * proper permission bits. */
+void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
+{
+ int i;
+
+ /* XXX Replace loop with fancy data structures. */
+ down_write(&current->mm->mmap_sem);
+ for (i = 0; i <= tlb_44x_hwater; i++) {
+ kvmppc_44x_shadow_release(vcpu, i);
+ vcpu->arch.shadow_tlb[i].word0 = 0;
+ }
+ up_write(&current->mm->mmap_sem);
+}
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h
new file mode 100644
index 000000000000..2ccd46b6f6b7
--- /dev/null
+++ b/arch/powerpc/kvm/44x_tlb.h
@@ -0,0 +1,91 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __KVM_POWERPC_TLB_H__
+#define __KVM_POWERPC_TLB_H__
+
+#include <linux/kvm_host.h>
+#include <asm/mmu-44x.h>
+
+extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr,
+ unsigned int pid, unsigned int as);
+extern struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr);
+extern struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr);
+
+/* TLB helper functions */
+static inline unsigned int get_tlb_size(const struct tlbe *tlbe)
+{
+ return (tlbe->word0 >> 4) & 0xf;
+}
+
+static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe)
+{
+ return tlbe->word0 & 0xfffffc00;
+}
+
+static inline gva_t get_tlb_bytes(const struct tlbe *tlbe)
+{
+ unsigned int pgsize = get_tlb_size(tlbe);
+ return 1 << 10 << (pgsize << 1);
+}
+
+static inline gva_t get_tlb_end(const struct tlbe *tlbe)
+{
+ return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1;
+}
+
+static inline u64 get_tlb_raddr(const struct tlbe *tlbe)
+{
+ u64 word1 = tlbe->word1;
+ return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00);
+}
+
+static inline unsigned int get_tlb_tid(const struct tlbe *tlbe)
+{
+ return tlbe->tid & 0xff;
+}
+
+static inline unsigned int get_tlb_ts(const struct tlbe *tlbe)
+{
+ return (tlbe->word0 >> 8) & 0x1;
+}
+
+static inline unsigned int get_tlb_v(const struct tlbe *tlbe)
+{
+ return (tlbe->word0 >> 9) & 0x1;
+}
+
+static inline unsigned int get_mmucr_stid(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.mmucr & 0xff;
+}
+
+static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.mmucr >> 16) & 0x1;
+}
+
+static inline gpa_t tlb_xlate(struct tlbe *tlbe, gva_t eaddr)
+{
+ unsigned int pgmask = get_tlb_bytes(tlbe) - 1;
+
+ return get_tlb_raddr(tlbe) | (eaddr & pgmask);
+}
+
+#endif /* __KVM_POWERPC_TLB_H__ */
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
new file mode 100644
index 000000000000..6b076010213b
--- /dev/null
+++ b/arch/powerpc/kvm/Kconfig
@@ -0,0 +1,42 @@
+#
+# KVM configuration
+#
+
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
+ ---help---
+ Say Y here to get to see options for using your Linux host to run
+ other operating systems inside virtual machines (guests).
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and
+ disabled.
+
+if VIRTUALIZATION
+
+config KVM
+ bool "Kernel-based Virtual Machine (KVM) support"
+ depends on 44x && EXPERIMENTAL
+ select PREEMPT_NOTIFIERS
+ select ANON_INODES
+ # We can only run on Book E hosts so far
+ select KVM_BOOKE_HOST
+ ---help---
+ Support hosting virtualized guest machines. You will also
+ need to select one or more of the processor modules below.
+
+ This module provides access to the hardware capabilities through
+ a character device node named /dev/kvm.
+
+ If unsure, say N.
+
+config KVM_BOOKE_HOST
+ bool "KVM host support for Book E PowerPC processors"
+ depends on KVM && 44x
+ ---help---
+ Provides host support for KVM on Book E PowerPC processors. Currently
+ this works on 440 processors only.
+
+source drivers/virtio/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
new file mode 100644
index 000000000000..d0d358d367ec
--- /dev/null
+++ b/arch/powerpc/kvm/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for Kernel-based Virtual Machine module
+#
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm
+
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o)
+
+kvm-objs := $(common-objs) powerpc.o emulate.o booke_guest.o
+obj-$(CONFIG_KVM) += kvm.o
+
+AFLAGS_booke_interrupts.o := -I$(obj)
+
+kvm-booke-host-objs := booke_host.o booke_interrupts.o 44x_tlb.o
+obj-$(CONFIG_KVM_BOOKE_HOST) += kvm-booke-host.o
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke_guest.c
new file mode 100644
index 000000000000..6d9884a6884a
--- /dev/null
+++ b/arch/powerpc/kvm/booke_guest.c
@@ -0,0 +1,615 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <asm/cputable.h>
+#include <asm/uaccess.h>
+#include <asm/kvm_ppc.h>
+
+#include "44x_tlb.h"
+
+#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+ { "exits", VCPU_STAT(sum_exits) },
+ { "mmio", VCPU_STAT(mmio_exits) },
+ { "dcr", VCPU_STAT(dcr_exits) },
+ { "sig", VCPU_STAT(signal_exits) },
+ { "light", VCPU_STAT(light_exits) },
+ { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
+ { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
+ { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
+ { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
+ { "sysc", VCPU_STAT(syscall_exits) },
+ { "isi", VCPU_STAT(isi_exits) },
+ { "dsi", VCPU_STAT(dsi_exits) },
+ { "inst_emu", VCPU_STAT(emulated_inst_exits) },
+ { "dec", VCPU_STAT(dec_exits) },
+ { "ext_intr", VCPU_STAT(ext_intr_exits) },
+ { NULL }
+};
+
+static const u32 interrupt_msr_mask[16] = {
+ [BOOKE_INTERRUPT_CRITICAL] = MSR_ME,
+ [BOOKE_INTERRUPT_MACHINE_CHECK] = 0,
+ [BOOKE_INTERRUPT_DATA_STORAGE] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_INST_STORAGE] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_EXTERNAL] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_ALIGNMENT] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_PROGRAM] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_FP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_SYSCALL] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_AP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_DECREMENTER] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_FIT] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_WATCHDOG] = MSR_ME,
+ [BOOKE_INTERRUPT_DTLB_MISS] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_ITLB_MISS] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_DEBUG] = MSR_ME,
+};
+
+const unsigned char exception_priority[] = {
+ [BOOKE_INTERRUPT_DATA_STORAGE] = 0,
+ [BOOKE_INTERRUPT_INST_STORAGE] = 1,
+ [BOOKE_INTERRUPT_ALIGNMENT] = 2,
+ [BOOKE_INTERRUPT_PROGRAM] = 3,
+ [BOOKE_INTERRUPT_FP_UNAVAIL] = 4,
+ [BOOKE_INTERRUPT_SYSCALL] = 5,
+ [BOOKE_INTERRUPT_AP_UNAVAIL] = 6,
+ [BOOKE_INTERRUPT_DTLB_MISS] = 7,
+ [BOOKE_INTERRUPT_ITLB_MISS] = 8,
+ [BOOKE_INTERRUPT_MACHINE_CHECK] = 9,
+ [BOOKE_INTERRUPT_DEBUG] = 10,
+ [BOOKE_INTERRUPT_CRITICAL] = 11,
+ [BOOKE_INTERRUPT_WATCHDOG] = 12,
+ [BOOKE_INTERRUPT_EXTERNAL] = 13,
+ [BOOKE_INTERRUPT_FIT] = 14,
+ [BOOKE_INTERRUPT_DECREMENTER] = 15,
+};
+
+const unsigned char priority_exception[] = {
+ BOOKE_INTERRUPT_DATA_STORAGE,
+ BOOKE_INTERRUPT_INST_STORAGE,
+ BOOKE_INTERRUPT_ALIGNMENT,
+ BOOKE_INTERRUPT_PROGRAM,
+ BOOKE_INTERRUPT_FP_UNAVAIL,
+ BOOKE_INTERRUPT_SYSCALL,
+ BOOKE_INTERRUPT_AP_UNAVAIL,
+ BOOKE_INTERRUPT_DTLB_MISS,
+ BOOKE_INTERRUPT_ITLB_MISS,
+ BOOKE_INTERRUPT_MACHINE_CHECK,
+ BOOKE_INTERRUPT_DEBUG,
+ BOOKE_INTERRUPT_CRITICAL,
+ BOOKE_INTERRUPT_WATCHDOG,
+ BOOKE_INTERRUPT_EXTERNAL,
+ BOOKE_INTERRUPT_FIT,
+ BOOKE_INTERRUPT_DECREMENTER,
+};
+
+
+void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
+{
+ struct tlbe *tlbe;
+ int i;
+
+ printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
+ printk("| %2s | %3s | %8s | %8s | %8s |\n",
+ "nr", "tid", "word0", "word1", "word2");
+
+ for (i = 0; i < PPC44x_TLB_SIZE; i++) {
+ tlbe = &vcpu->arch.guest_tlb[i];
+ if (tlbe->word0 & PPC44x_TLB_VALID)
+ printk(" G%2d | %02X | %08X | %08X | %08X |\n",
+ i, tlbe->tid, tlbe->word0, tlbe->word1,
+ tlbe->word2);
+ }
+
+ for (i = 0; i < PPC44x_TLB_SIZE; i++) {
+ tlbe = &vcpu->arch.shadow_tlb[i];
+ if (tlbe->word0 & PPC44x_TLB_VALID)
+ printk(" S%2d | %02X | %08X | %08X | %08X |\n",
+ i, tlbe->tid, tlbe->word0, tlbe->word1,
+ tlbe->word2);
+ }
+}
+
+/* TODO: use vcpu_printf() */
+void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
+{
+ int i;
+
+ printk("pc: %08x msr: %08x\n", vcpu->arch.pc, vcpu->arch.msr);
+ printk("lr: %08x ctr: %08x\n", vcpu->arch.lr, vcpu->arch.ctr);
+ printk("srr0: %08x srr1: %08x\n", vcpu->arch.srr0, vcpu->arch.srr1);
+
+ printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
+
+ for (i = 0; i < 32; i += 4) {
+ printk("gpr%02d: %08x %08x %08x %08x\n", i,
+ vcpu->arch.gpr[i],
+ vcpu->arch.gpr[i+1],
+ vcpu->arch.gpr[i+2],
+ vcpu->arch.gpr[i+3]);
+ }
+}
+
+/* Check if we are ready to deliver the interrupt */
+static int kvmppc_can_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt)
+{
+ int r;
+
+ switch (interrupt) {
+ case BOOKE_INTERRUPT_CRITICAL:
+ r = vcpu->arch.msr & MSR_CE;
+ break;
+ case BOOKE_INTERRUPT_MACHINE_CHECK:
+ r = vcpu->arch.msr & MSR_ME;
+ break;
+ case BOOKE_INTERRUPT_EXTERNAL:
+ r = vcpu->arch.msr & MSR_EE;
+ break;
+ case BOOKE_INTERRUPT_DECREMENTER:
+ r = vcpu->arch.msr & MSR_EE;
+ break;
+ case BOOKE_INTERRUPT_FIT:
+ r = vcpu->arch.msr & MSR_EE;
+ break;
+ case BOOKE_INTERRUPT_WATCHDOG:
+ r = vcpu->arch.msr & MSR_CE;
+ break;
+ case BOOKE_INTERRUPT_DEBUG:
+ r = vcpu->arch.msr & MSR_DE;
+ break;
+ default:
+ r = 1;
+ }
+
+ return r;
+}
+
+static void kvmppc_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt)
+{
+ switch (interrupt) {
+ case BOOKE_INTERRUPT_DECREMENTER:
+ vcpu->arch.tsr |= TSR_DIS;
+ break;
+ }
+
+ vcpu->arch.srr0 = vcpu->arch.pc;
+ vcpu->arch.srr1 = vcpu->arch.msr;
+ vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[interrupt];
+ kvmppc_set_msr(vcpu, vcpu->arch.msr & interrupt_msr_mask[interrupt]);
+}
+
+/* Check pending exceptions and deliver one, if possible. */
+void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu)
+{
+ unsigned long *pending = &vcpu->arch.pending_exceptions;
+ unsigned int exception;
+ unsigned int priority;
+
+ priority = find_first_bit(pending, BITS_PER_BYTE * sizeof(*pending));
+ while (priority <= BOOKE_MAX_INTERRUPT) {
+ exception = priority_exception[priority];
+ if (kvmppc_can_deliver_interrupt(vcpu, exception)) {
+ kvmppc_clear_exception(vcpu, exception);
+ kvmppc_deliver_interrupt(vcpu, exception);
+ break;
+ }
+
+ priority = find_next_bit(pending,
+ BITS_PER_BYTE * sizeof(*pending),
+ priority + 1);
+ }
+}
+
+static int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er;
+ int r;
+
+ er = kvmppc_emulate_instruction(run, vcpu);
+ switch (er) {
+ case EMULATE_DONE:
+ /* Future optimization: only reload non-volatiles if they were
+ * actually modified. */
+ r = RESUME_GUEST_NV;
+ break;
+ case EMULATE_DO_MMIO:
+ run->exit_reason = KVM_EXIT_MMIO;
+ /* We must reload nonvolatiles because "update" load/store
+ * instructions modify register state. */
+ /* Future optimization: only reload non-volatiles if they were
+ * actually modified. */
+ r = RESUME_HOST_NV;
+ break;
+ case EMULATE_FAIL:
+ /* XXX Deliver Program interrupt to guest. */
+ printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
+ vcpu->arch.last_inst);
+ r = RESUME_HOST;
+ break;
+ default:
+ BUG();
+ }
+
+ return r;
+}
+
+/**
+ * kvmppc_handle_exit
+ *
+ * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
+ */
+int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int exit_nr)
+{
+ enum emulation_result er;
+ int r = RESUME_HOST;
+
+ local_irq_enable();
+
+ run->exit_reason = KVM_EXIT_UNKNOWN;
+ run->ready_for_interrupt_injection = 1;
+
+ switch (exit_nr) {
+ case BOOKE_INTERRUPT_MACHINE_CHECK:
+ printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
+ kvmppc_dump_vcpu(vcpu);
+ r = RESUME_HOST;
+ break;
+
+ case BOOKE_INTERRUPT_EXTERNAL:
+ case BOOKE_INTERRUPT_DECREMENTER:
+ /* Since we switched IVPR back to the host's value, the host
+ * handled this interrupt the moment we enabled interrupts.
+ * Now we just offer it a chance to reschedule the guest. */
+
+ /* XXX At this point the TLB still holds our shadow TLB, so if
+ * we do reschedule the host will fault over it. Perhaps we
+ * should politely restore the host's entries to minimize
+ * misses before ceding control. */
+ if (need_resched())
+ cond_resched();
+ if (exit_nr == BOOKE_INTERRUPT_DECREMENTER)
+ vcpu->stat.dec_exits++;
+ else
+ vcpu->stat.ext_intr_exits++;
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_PROGRAM:
+ if (vcpu->arch.msr & MSR_PR) {
+ /* Program traps generated by user-level software must be handled
+ * by the guest kernel. */
+ vcpu->arch.esr = vcpu->arch.fault_esr;
+ kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM);
+ r = RESUME_GUEST;
+ break;
+ }
+
+ er = kvmppc_emulate_instruction(run, vcpu);
+ switch (er) {
+ case EMULATE_DONE:
+ /* Future optimization: only reload non-volatiles if
+ * they were actually modified by emulation. */
+ vcpu->stat.emulated_inst_exits++;
+ r = RESUME_GUEST_NV;
+ break;
+ case EMULATE_DO_DCR:
+ run->exit_reason = KVM_EXIT_DCR;
+ r = RESUME_HOST;
+ break;
+ case EMULATE_FAIL:
+ /* XXX Deliver Program interrupt to guest. */
+ printk(KERN_CRIT "%s: emulation at %x failed (%08x)\n",
+ __func__, vcpu->arch.pc, vcpu->arch.last_inst);
+ /* For debugging, encode the failing instruction and
+ * report it to userspace. */
+ run->hw.hardware_exit_reason = ~0ULL << 32;
+ run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
+ r = RESUME_HOST;
+ break;
+ default:
+ BUG();
+ }
+ break;
+
+ case BOOKE_INTERRUPT_DATA_STORAGE:
+ vcpu->arch.dear = vcpu->arch.fault_dear;
+ vcpu->arch.esr = vcpu->arch.fault_esr;
+ kvmppc_queue_exception(vcpu, exit_nr);
+ vcpu->stat.dsi_exits++;
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_INST_STORAGE:
+ vcpu->arch.esr = vcpu->arch.fault_esr;
+ kvmppc_queue_exception(vcpu, exit_nr);
+ vcpu->stat.isi_exits++;
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_SYSCALL:
+ kvmppc_queue_exception(vcpu, exit_nr);
+ vcpu->stat.syscall_exits++;
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_DTLB_MISS: {
+ struct tlbe *gtlbe;
+ unsigned long eaddr = vcpu->arch.fault_dear;
+ gfn_t gfn;
+
+ /* Check the guest TLB. */
+ gtlbe = kvmppc_44x_dtlb_search(vcpu, eaddr);
+ if (!gtlbe) {
+ /* The guest didn't have a mapping for it. */
+ kvmppc_queue_exception(vcpu, exit_nr);
+ vcpu->arch.dear = vcpu->arch.fault_dear;
+ vcpu->arch.esr = vcpu->arch.fault_esr;
+ vcpu->stat.dtlb_real_miss_exits++;
+ r = RESUME_GUEST;
+ break;
+ }
+
+ vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr);
+ gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT;
+
+ if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
+ /* The guest TLB had a mapping, but the shadow TLB
+ * didn't, and it is RAM. This could be because:
+ * a) the entry is mapping the host kernel, or
+ * b) the guest used a large mapping which we're faking
+ * Either way, we need to satisfy the fault without
+ * invoking the guest. */
+ kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid,
+ gtlbe->word2);
+ vcpu->stat.dtlb_virt_miss_exits++;
+ r = RESUME_GUEST;
+ } else {
+ /* Guest has mapped and accessed a page which is not
+ * actually RAM. */
+ r = kvmppc_emulate_mmio(run, vcpu);
+ }
+
+ break;
+ }
+
+ case BOOKE_INTERRUPT_ITLB_MISS: {
+ struct tlbe *gtlbe;
+ unsigned long eaddr = vcpu->arch.pc;
+ gfn_t gfn;
+
+ r = RESUME_GUEST;
+
+ /* Check the guest TLB. */
+ gtlbe = kvmppc_44x_itlb_search(vcpu, eaddr);
+ if (!gtlbe) {
+ /* The guest didn't have a mapping for it. */
+ kvmppc_queue_exception(vcpu, exit_nr);
+ vcpu->stat.itlb_real_miss_exits++;
+ break;
+ }
+
+ vcpu->stat.itlb_virt_miss_exits++;
+
+ gfn = tlb_xlate(gtlbe, eaddr) >> PAGE_SHIFT;
+
+ if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
+ /* The guest TLB had a mapping, but the shadow TLB
+ * didn't. This could be because:
+ * a) the entry is mapping the host kernel, or
+ * b) the guest used a large mapping which we're faking
+ * Either way, we need to satisfy the fault without
+ * invoking the guest. */
+ kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid,
+ gtlbe->word2);
+ } else {
+ /* Guest mapped and leaped at non-RAM! */
+ kvmppc_queue_exception(vcpu,
+ BOOKE_INTERRUPT_MACHINE_CHECK);
+ }
+
+ break;
+ }
+
+ default:
+ printk(KERN_EMERG "exit_nr %d\n", exit_nr);
+ BUG();
+ }
+
+ local_irq_disable();
+
+ kvmppc_check_and_deliver_interrupts(vcpu);
+
+ /* Do some exit accounting. */
+ vcpu->stat.sum_exits++;
+ if (!(r & RESUME_HOST)) {
+ /* To avoid clobbering exit_reason, only check for signals if
+ * we aren't already exiting to userspace for some other
+ * reason. */
+ if (signal_pending(current)) {
+ run->exit_reason = KVM_EXIT_INTR;
+ r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
+
+ vcpu->stat.signal_exits++;
+ } else {
+ vcpu->stat.light_exits++;
+ }
+ } else {
+ switch (run->exit_reason) {
+ case KVM_EXIT_MMIO:
+ vcpu->stat.mmio_exits++;
+ break;
+ case KVM_EXIT_DCR:
+ vcpu->stat.dcr_exits++;
+ break;
+ case KVM_EXIT_INTR:
+ vcpu->stat.signal_exits++;
+ break;
+ }
+ }
+
+ return r;
+}
+
+/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ struct tlbe *tlbe = &vcpu->arch.guest_tlb[0];
+
+ tlbe->tid = 0;
+ tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID;
+ tlbe->word1 = 0;
+ tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR;
+
+ tlbe++;
+ tlbe->tid = 0;
+ tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID;
+ tlbe->word1 = 0xef600000;
+ tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR
+ | PPC44x_TLB_I | PPC44x_TLB_G;
+
+ vcpu->arch.pc = 0;
+ vcpu->arch.msr = 0;
+ vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */
+
+ /* Eye-catching number so we know if the guest takes an interrupt
+ * before it's programmed its own IVPR. */
+ vcpu->arch.ivpr = 0x55550000;
+
+ /* Since the guest can directly access the timebase, it must know the
+ * real timebase frequency. Accordingly, it must see the state of
+ * CCR1[TCS]. */
+ vcpu->arch.ccr1 = mfspr(SPRN_CCR1);
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+
+ regs->pc = vcpu->arch.pc;
+ regs->cr = vcpu->arch.cr;
+ regs->ctr = vcpu->arch.ctr;
+ regs->lr = vcpu->arch.lr;
+ regs->xer = vcpu->arch.xer;
+ regs->msr = vcpu->arch.msr;
+ regs->srr0 = vcpu->arch.srr0;
+ regs->srr1 = vcpu->arch.srr1;
+ regs->pid = vcpu->arch.pid;
+ regs->sprg0 = vcpu->arch.sprg0;
+ regs->sprg1 = vcpu->arch.sprg1;
+ regs->sprg2 = vcpu->arch.sprg2;
+ regs->sprg3 = vcpu->arch.sprg3;
+ regs->sprg5 = vcpu->arch.sprg4;
+ regs->sprg6 = vcpu->arch.sprg5;
+ regs->sprg7 = vcpu->arch.sprg6;
+
+ for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
+ regs->gpr[i] = vcpu->arch.gpr[i];
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+
+ vcpu->arch.pc = regs->pc;
+ vcpu->arch.cr = regs->cr;
+ vcpu->arch.ctr = regs->ctr;
+ vcpu->arch.lr = regs->lr;
+ vcpu->arch.xer = regs->xer;
+ vcpu->arch.msr = regs->msr;
+ vcpu->arch.srr0 = regs->srr0;
+ vcpu->arch.srr1 = regs->srr1;
+ vcpu->arch.sprg0 = regs->sprg0;
+ vcpu->arch.sprg1 = regs->sprg1;
+ vcpu->arch.sprg2 = regs->sprg2;
+ vcpu->arch.sprg3 = regs->sprg3;
+ vcpu->arch.sprg5 = regs->sprg4;
+ vcpu->arch.sprg6 = regs->sprg5;
+ vcpu->arch.sprg7 = regs->sprg6;
+
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
+ vcpu->arch.gpr[i] = regs->gpr[i];
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -ENOTSUPP;
+}
+
+/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr)
+{
+ struct tlbe *gtlbe;
+ int index;
+ gva_t eaddr;
+ u8 pid;
+ u8 as;
+
+ eaddr = tr->linear_address;
+ pid = (tr->linear_address >> 32) & 0xff;
+ as = (tr->linear_address >> 40) & 0x1;
+
+ index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as);
+ if (index == -1) {
+ tr->valid = 0;
+ return 0;
+ }
+
+ gtlbe = &vcpu->arch.guest_tlb[index];
+
+ tr->physical_address = tlb_xlate(gtlbe, eaddr);
+ /* XXX what does "writeable" and "usermode" even mean? */
+ tr->valid = 1;
+
+ return 0;
+}
diff --git a/arch/powerpc/kvm/booke_host.c b/arch/powerpc/kvm/booke_host.c
new file mode 100644
index 000000000000..b480341bc31e
--- /dev/null
+++ b/arch/powerpc/kvm/booke_host.c
@@ -0,0 +1,83 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <asm/cacheflush.h>
+#include <asm/kvm_ppc.h>
+
+unsigned long kvmppc_booke_handlers;
+
+static int kvmppc_booke_init(void)
+{
+ unsigned long ivor[16];
+ unsigned long max_ivor = 0;
+ int i;
+
+ /* We install our own exception handlers by hijacking IVPR. IVPR must
+ * be 16-bit aligned, so we need a 64KB allocation. */
+ kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ VCPU_SIZE_ORDER);
+ if (!kvmppc_booke_handlers)
+ return -ENOMEM;
+
+ /* XXX make sure our handlers are smaller than Linux's */
+
+ /* Copy our interrupt handlers to match host IVORs. That way we don't
+ * have to swap the IVORs on every guest/host transition. */
+ ivor[0] = mfspr(SPRN_IVOR0);
+ ivor[1] = mfspr(SPRN_IVOR1);
+ ivor[2] = mfspr(SPRN_IVOR2);
+ ivor[3] = mfspr(SPRN_IVOR3);
+ ivor[4] = mfspr(SPRN_IVOR4);
+ ivor[5] = mfspr(SPRN_IVOR5);
+ ivor[6] = mfspr(SPRN_IVOR6);
+ ivor[7] = mfspr(SPRN_IVOR7);
+ ivor[8] = mfspr(SPRN_IVOR8);
+ ivor[9] = mfspr(SPRN_IVOR9);
+ ivor[10] = mfspr(SPRN_IVOR10);
+ ivor[11] = mfspr(SPRN_IVOR11);
+ ivor[12] = mfspr(SPRN_IVOR12);
+ ivor[13] = mfspr(SPRN_IVOR13);
+ ivor[14] = mfspr(SPRN_IVOR14);
+ ivor[15] = mfspr(SPRN_IVOR15);
+
+ for (i = 0; i < 16; i++) {
+ if (ivor[i] > max_ivor)
+ max_ivor = ivor[i];
+
+ memcpy((void *)kvmppc_booke_handlers + ivor[i],
+ kvmppc_handlers_start + i * kvmppc_handler_len,
+ kvmppc_handler_len);
+ }
+ flush_icache_range(kvmppc_booke_handlers,
+ kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
+
+ return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
+}
+
+static void __exit kvmppc_booke_exit(void)
+{
+ free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
+ kvm_exit();
+}
+
+module_init(kvmppc_booke_init)
+module_exit(kvmppc_booke_exit)
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
new file mode 100644
index 000000000000..3b653b5309b8
--- /dev/null
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -0,0 +1,436 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/kvm_asm.h>
+#include <asm/reg.h>
+#include <asm/mmu-44x.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+
+#define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS)
+
+#define VCPU_GPR(n) (VCPU_GPRS + (n * 4))
+
+/* The host stack layout: */
+#define HOST_R1 0 /* Implied by stwu. */
+#define HOST_CALLEE_LR 4
+#define HOST_RUN 8
+/* r2 is special: it holds 'current', and it made nonvolatile in the
+ * kernel with the -ffixed-r2 gcc option. */
+#define HOST_R2 12
+#define HOST_NV_GPRS 16
+#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
+#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
+#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
+#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */
+
+#define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \
+ (1<<BOOKE_INTERRUPT_DTLB_MISS))
+
+#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
+ (1<<BOOKE_INTERRUPT_DTLB_MISS))
+
+#define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
+ (1<<BOOKE_INTERRUPT_INST_STORAGE) | \
+ (1<<BOOKE_INTERRUPT_PROGRAM) | \
+ (1<<BOOKE_INTERRUPT_DTLB_MISS))
+
+.macro KVM_HANDLER ivor_nr
+_GLOBAL(kvmppc_handler_\ivor_nr)
+ /* Get pointer to vcpu and record exit number. */
+ mtspr SPRN_SPRG0, r4
+ mfspr r4, SPRN_SPRG1
+ stw r5, VCPU_GPR(r5)(r4)
+ stw r6, VCPU_GPR(r6)(r4)
+ mfctr r5
+ lis r6, kvmppc_resume_host@h
+ stw r5, VCPU_CTR(r4)
+ li r5, \ivor_nr
+ ori r6, r6, kvmppc_resume_host@l
+ mtctr r6
+ bctr
+.endm
+
+_GLOBAL(kvmppc_handlers_start)
+KVM_HANDLER BOOKE_INTERRUPT_CRITICAL
+KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK
+KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE
+KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE
+KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL
+KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT
+KVM_HANDLER BOOKE_INTERRUPT_PROGRAM
+KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL
+KVM_HANDLER BOOKE_INTERRUPT_SYSCALL
+KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL
+KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER
+KVM_HANDLER BOOKE_INTERRUPT_FIT
+KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG
+KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS
+KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS
+KVM_HANDLER BOOKE_INTERRUPT_DEBUG
+
+_GLOBAL(kvmppc_handler_len)
+ .long kvmppc_handler_1 - kvmppc_handler_0
+
+
+/* Registers:
+ * SPRG0: guest r4
+ * r4: vcpu pointer
+ * r5: KVM exit number
+ */
+_GLOBAL(kvmppc_resume_host)
+ stw r3, VCPU_GPR(r3)(r4)
+ mfcr r3
+ stw r3, VCPU_CR(r4)
+ stw r7, VCPU_GPR(r7)(r4)
+ stw r8, VCPU_GPR(r8)(r4)
+ stw r9, VCPU_GPR(r9)(r4)
+
+ li r6, 1
+ slw r6, r6, r5
+
+ /* Save the faulting instruction and all GPRs for emulation. */
+ andi. r7, r6, NEED_INST_MASK
+ beq ..skip_inst_copy
+ mfspr r9, SPRN_SRR0
+ mfmsr r8
+ ori r7, r8, MSR_DS
+ mtmsr r7
+ isync
+ lwz r9, 0(r9)
+ mtmsr r8
+ isync
+ stw r9, VCPU_LAST_INST(r4)
+
+ stw r15, VCPU_GPR(r15)(r4)
+ stw r16, VCPU_GPR(r16)(r4)
+ stw r17, VCPU_GPR(r17)(r4)
+ stw r18, VCPU_GPR(r18)(r4)
+ stw r19, VCPU_GPR(r19)(r4)
+ stw r20, VCPU_GPR(r20)(r4)
+ stw r21, VCPU_GPR(r21)(r4)
+ stw r22, VCPU_GPR(r22)(r4)
+ stw r23, VCPU_GPR(r23)(r4)
+ stw r24, VCPU_GPR(r24)(r4)
+ stw r25, VCPU_GPR(r25)(r4)
+ stw r26, VCPU_GPR(r26)(r4)
+ stw r27, VCPU_GPR(r27)(r4)
+ stw r28, VCPU_GPR(r28)(r4)
+ stw r29, VCPU_GPR(r29)(r4)
+ stw r30, VCPU_GPR(r30)(r4)
+ stw r31, VCPU_GPR(r31)(r4)
+..skip_inst_copy:
+
+ /* Also grab DEAR and ESR before the host can clobber them. */
+
+ andi. r7, r6, NEED_DEAR_MASK
+ beq ..skip_dear
+ mfspr r9, SPRN_DEAR
+ stw r9, VCPU_FAULT_DEAR(r4)
+..skip_dear:
+
+ andi. r7, r6, NEED_ESR_MASK
+ beq ..skip_esr
+ mfspr r9, SPRN_ESR
+ stw r9, VCPU_FAULT_ESR(r4)
+..skip_esr:
+
+ /* Save remaining volatile guest register state to vcpu. */
+ stw r0, VCPU_GPR(r0)(r4)
+ stw r1, VCPU_GPR(r1)(r4)
+ stw r2, VCPU_GPR(r2)(r4)
+ stw r10, VCPU_GPR(r10)(r4)
+ stw r11, VCPU_GPR(r11)(r4)
+ stw r12, VCPU_GPR(r12)(r4)
+ stw r13, VCPU_GPR(r13)(r4)
+ stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */
+ mflr r3
+ stw r3, VCPU_LR(r4)
+ mfxer r3
+ stw r3, VCPU_XER(r4)
+ mfspr r3, SPRN_SPRG0
+ stw r3, VCPU_GPR(r4)(r4)
+ mfspr r3, SPRN_SRR0
+ stw r3, VCPU_PC(r4)
+
+ /* Restore host stack pointer and PID before IVPR, since the host
+ * exception handlers use them. */
+ lwz r1, VCPU_HOST_STACK(r4)
+ lwz r3, VCPU_HOST_PID(r4)
+ mtspr SPRN_PID, r3
+
+ /* Restore host IVPR before re-enabling interrupts. We cheat and know
+ * that Linux IVPR is always 0xc0000000. */
+ lis r3, 0xc000
+ mtspr SPRN_IVPR, r3
+
+ /* Switch to kernel stack and jump to handler. */
+ LOAD_REG_ADDR(r3, kvmppc_handle_exit)
+ mtctr r3
+ lwz r3, HOST_RUN(r1)
+ lwz r2, HOST_R2(r1)
+ mr r14, r4 /* Save vcpu pointer. */
+
+ bctrl /* kvmppc_handle_exit() */
+
+ /* Restore vcpu pointer and the nonvolatiles we used. */
+ mr r4, r14
+ lwz r14, VCPU_GPR(r14)(r4)
+
+ /* Sometimes instruction emulation must restore complete GPR state. */
+ andi. r5, r3, RESUME_FLAG_NV
+ beq ..skip_nv_load
+ lwz r15, VCPU_GPR(r15)(r4)
+ lwz r16, VCPU_GPR(r16)(r4)
+ lwz r17, VCPU_GPR(r17)(r4)
+ lwz r18, VCPU_GPR(r18)(r4)
+ lwz r19, VCPU_GPR(r19)(r4)
+ lwz r20, VCPU_GPR(r20)(r4)
+ lwz r21, VCPU_GPR(r21)(r4)
+ lwz r22, VCPU_GPR(r22)(r4)
+ lwz r23, VCPU_GPR(r23)(r4)
+ lwz r24, VCPU_GPR(r24)(r4)
+ lwz r25, VCPU_GPR(r25)(r4)
+ lwz r26, VCPU_GPR(r26)(r4)
+ lwz r27, VCPU_GPR(r27)(r4)
+ lwz r28, VCPU_GPR(r28)(r4)
+ lwz r29, VCPU_GPR(r29)(r4)
+ lwz r30, VCPU_GPR(r30)(r4)
+ lwz r31, VCPU_GPR(r31)(r4)
+..skip_nv_load:
+
+ /* Should we return to the guest? */
+ andi. r5, r3, RESUME_FLAG_HOST
+ beq lightweight_exit
+
+ srawi r3, r3, 2 /* Shift -ERR back down. */
+
+heavyweight_exit:
+ /* Not returning to guest. */
+
+ /* We already saved guest volatile register state; now save the
+ * non-volatiles. */
+ stw r15, VCPU_GPR(r15)(r4)
+ stw r16, VCPU_GPR(r16)(r4)
+ stw r17, VCPU_GPR(r17)(r4)
+ stw r18, VCPU_GPR(r18)(r4)
+ stw r19, VCPU_GPR(r19)(r4)
+ stw r20, VCPU_GPR(r20)(r4)
+ stw r21, VCPU_GPR(r21)(r4)
+ stw r22, VCPU_GPR(r22)(r4)
+ stw r23, VCPU_GPR(r23)(r4)
+ stw r24, VCPU_GPR(r24)(r4)
+ stw r25, VCPU_GPR(r25)(r4)
+ stw r26, VCPU_GPR(r26)(r4)
+ stw r27, VCPU_GPR(r27)(r4)
+ stw r28, VCPU_GPR(r28)(r4)
+ stw r29, VCPU_GPR(r29)(r4)
+ stw r30, VCPU_GPR(r30)(r4)
+ stw r31, VCPU_GPR(r31)(r4)
+
+ /* Load host non-volatile register state from host stack. */
+ lwz r14, HOST_NV_GPR(r14)(r1)
+ lwz r15, HOST_NV_GPR(r15)(r1)
+ lwz r16, HOST_NV_GPR(r16)(r1)
+ lwz r17, HOST_NV_GPR(r17)(r1)
+ lwz r18, HOST_NV_GPR(r18)(r1)
+ lwz r19, HOST_NV_GPR(r19)(r1)
+ lwz r20, HOST_NV_GPR(r20)(r1)
+ lwz r21, HOST_NV_GPR(r21)(r1)
+ lwz r22, HOST_NV_GPR(r22)(r1)
+ lwz r23, HOST_NV_GPR(r23)(r1)
+ lwz r24, HOST_NV_GPR(r24)(r1)
+ lwz r25, HOST_NV_GPR(r25)(r1)
+ lwz r26, HOST_NV_GPR(r26)(r1)
+ lwz r27, HOST_NV_GPR(r27)(r1)
+ lwz r28, HOST_NV_GPR(r28)(r1)
+ lwz r29, HOST_NV_GPR(r29)(r1)
+ lwz r30, HOST_NV_GPR(r30)(r1)
+ lwz r31, HOST_NV_GPR(r31)(r1)
+
+ /* Return to kvm_vcpu_run(). */
+ lwz r4, HOST_STACK_LR(r1)
+ addi r1, r1, HOST_STACK_SIZE
+ mtlr r4
+ /* r3 still contains the return code from kvmppc_handle_exit(). */
+ blr
+
+
+/* Registers:
+ * r3: kvm_run pointer
+ * r4: vcpu pointer
+ */
+_GLOBAL(__kvmppc_vcpu_run)
+ stwu r1, -HOST_STACK_SIZE(r1)
+ stw r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
+
+ /* Save host state to stack. */
+ stw r3, HOST_RUN(r1)
+ mflr r3
+ stw r3, HOST_STACK_LR(r1)
+
+ /* Save host non-volatile register state to stack. */
+ stw r14, HOST_NV_GPR(r14)(r1)
+ stw r15, HOST_NV_GPR(r15)(r1)
+ stw r16, HOST_NV_GPR(r16)(r1)
+ stw r17, HOST_NV_GPR(r17)(r1)
+ stw r18, HOST_NV_GPR(r18)(r1)
+ stw r19, HOST_NV_GPR(r19)(r1)
+ stw r20, HOST_NV_GPR(r20)(r1)
+ stw r21, HOST_NV_GPR(r21)(r1)
+ stw r22, HOST_NV_GPR(r22)(r1)
+ stw r23, HOST_NV_GPR(r23)(r1)
+ stw r24, HOST_NV_GPR(r24)(r1)
+ stw r25, HOST_NV_GPR(r25)(r1)
+ stw r26, HOST_NV_GPR(r26)(r1)
+ stw r27, HOST_NV_GPR(r27)(r1)
+ stw r28, HOST_NV_GPR(r28)(r1)
+ stw r29, HOST_NV_GPR(r29)(r1)
+ stw r30, HOST_NV_GPR(r30)(r1)
+ stw r31, HOST_NV_GPR(r31)(r1)
+
+ /* Load guest non-volatiles. */
+ lwz r14, VCPU_GPR(r14)(r4)
+ lwz r15, VCPU_GPR(r15)(r4)
+ lwz r16, VCPU_GPR(r16)(r4)
+ lwz r17, VCPU_GPR(r17)(r4)
+ lwz r18, VCPU_GPR(r18)(r4)
+ lwz r19, VCPU_GPR(r19)(r4)
+ lwz r20, VCPU_GPR(r20)(r4)
+ lwz r21, VCPU_GPR(r21)(r4)
+ lwz r22, VCPU_GPR(r22)(r4)
+ lwz r23, VCPU_GPR(r23)(r4)
+ lwz r24, VCPU_GPR(r24)(r4)
+ lwz r25, VCPU_GPR(r25)(r4)
+ lwz r26, VCPU_GPR(r26)(r4)
+ lwz r27, VCPU_GPR(r27)(r4)
+ lwz r28, VCPU_GPR(r28)(r4)
+ lwz r29, VCPU_GPR(r29)(r4)
+ lwz r30, VCPU_GPR(r30)(r4)
+ lwz r31, VCPU_GPR(r31)(r4)
+
+lightweight_exit:
+ stw r2, HOST_R2(r1)
+
+ mfspr r3, SPRN_PID
+ stw r3, VCPU_HOST_PID(r4)
+ lwz r3, VCPU_PID(r4)
+ mtspr SPRN_PID, r3
+
+ /* Prevent all TLB updates. */
+ mfmsr r5
+ lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h
+ ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
+ andc r6, r5, r6
+ mtmsr r6
+
+ /* Save the host's non-pinned TLB mappings, and load the guest mappings
+ * over them. Leave the host's "pinned" kernel mappings in place. */
+ /* XXX optimization: use generation count to avoid swapping unmodified
+ * entries. */
+ mfspr r10, SPRN_MMUCR /* Save host MMUCR. */
+ lis r8, tlb_44x_hwater@ha
+ lwz r8, tlb_44x_hwater@l(r8)
+ addi r3, r4, VCPU_HOST_TLB - 4
+ addi r9, r4, VCPU_SHADOW_TLB - 4
+ li r6, 0
+1:
+ /* Save host entry. */
+ tlbre r7, r6, PPC44x_TLB_PAGEID
+ mfspr r5, SPRN_MMUCR
+ stwu r5, 4(r3)
+ stwu r7, 4(r3)
+ tlbre r7, r6, PPC44x_TLB_XLAT
+ stwu r7, 4(r3)
+ tlbre r7, r6, PPC44x_TLB_ATTRIB
+ stwu r7, 4(r3)
+ /* Load guest entry. */
+ lwzu r7, 4(r9)
+ mtspr SPRN_MMUCR, r7
+ lwzu r7, 4(r9)
+ tlbwe r7, r6, PPC44x_TLB_PAGEID
+ lwzu r7, 4(r9)
+ tlbwe r7, r6, PPC44x_TLB_XLAT
+ lwzu r7, 4(r9)
+ tlbwe r7, r6, PPC44x_TLB_ATTRIB
+ /* Increment index. */
+ addi r6, r6, 1
+ cmpw r6, r8
+ blt 1b
+ mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */
+
+ iccci 0, 0 /* XXX hack */
+
+ /* Load some guest volatiles. */
+ lwz r0, VCPU_GPR(r0)(r4)
+ lwz r2, VCPU_GPR(r2)(r4)
+ lwz r9, VCPU_GPR(r9)(r4)
+ lwz r10, VCPU_GPR(r10)(r4)
+ lwz r11, VCPU_GPR(r11)(r4)
+ lwz r12, VCPU_GPR(r12)(r4)
+ lwz r13, VCPU_GPR(r13)(r4)
+ lwz r3, VCPU_LR(r4)
+ mtlr r3
+ lwz r3, VCPU_XER(r4)
+ mtxer r3
+
+ /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed,
+ * so how do we make sure vcpu won't fault? */
+ lis r8, kvmppc_booke_handlers@ha
+ lwz r8, kvmppc_booke_handlers@l(r8)
+ mtspr SPRN_IVPR, r8
+
+ /* Save vcpu pointer for the exception handlers. */
+ mtspr SPRN_SPRG1, r4
+
+ /* Can't switch the stack pointer until after IVPR is switched,
+ * because host interrupt handlers would get confused. */
+ lwz r1, VCPU_GPR(r1)(r4)
+
+ /* XXX handle USPRG0 */
+ /* Host interrupt handlers may have clobbered these guest-readable
+ * SPRGs, so we need to reload them here with the guest's values. */
+ lwz r3, VCPU_SPRG4(r4)
+ mtspr SPRN_SPRG4, r3
+ lwz r3, VCPU_SPRG5(r4)
+ mtspr SPRN_SPRG5, r3
+ lwz r3, VCPU_SPRG6(r4)
+ mtspr SPRN_SPRG6, r3
+ lwz r3, VCPU_SPRG7(r4)
+ mtspr SPRN_SPRG7, r3
+
+ /* Finish loading guest volatiles and jump to guest. */
+ lwz r3, VCPU_CTR(r4)
+ mtctr r3
+ lwz r3, VCPU_CR(r4)
+ mtcr r3
+ lwz r5, VCPU_GPR(r5)(r4)
+ lwz r6, VCPU_GPR(r6)(r4)
+ lwz r7, VCPU_GPR(r7)(r4)
+ lwz r8, VCPU_GPR(r8)(r4)
+ lwz r3, VCPU_PC(r4)
+ mtsrr0 r3
+ lwz r3, VCPU_MSR(r4)
+ oris r3, r3, KVMPPC_MSR_MASK@h
+ ori r3, r3, KVMPPC_MSR_MASK@l
+ mtsrr1 r3
+ lwz r3, VCPU_GPR(r3)(r4)
+ lwz r4, VCPU_GPR(r4)(r4)
+ rfi
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
new file mode 100644
index 000000000000..a03fe0c80698
--- /dev/null
+++ b/arch/powerpc/kvm/emulate.c
@@ -0,0 +1,760 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm_host.h>
+
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+#include <asm/time.h>
+#include <asm/byteorder.h>
+#include <asm/kvm_ppc.h>
+
+#include "44x_tlb.h"
+
+/* Instruction decoding */
+static inline unsigned int get_op(u32 inst)
+{
+ return inst >> 26;
+}
+
+static inline unsigned int get_xop(u32 inst)
+{
+ return (inst >> 1) & 0x3ff;
+}
+
+static inline unsigned int get_sprn(u32 inst)
+{
+ return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
+static inline unsigned int get_dcrn(u32 inst)
+{
+ return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
+static inline unsigned int get_rt(u32 inst)
+{
+ return (inst >> 21) & 0x1f;
+}
+
+static inline unsigned int get_rs(u32 inst)
+{
+ return (inst >> 21) & 0x1f;
+}
+
+static inline unsigned int get_ra(u32 inst)
+{
+ return (inst >> 16) & 0x1f;
+}
+
+static inline unsigned int get_rb(u32 inst)
+{
+ return (inst >> 11) & 0x1f;
+}
+
+static inline unsigned int get_rc(u32 inst)
+{
+ return inst & 0x1;
+}
+
+static inline unsigned int get_ws(u32 inst)
+{
+ return (inst >> 11) & 0x1f;
+}
+
+static inline unsigned int get_d(u32 inst)
+{
+ return inst & 0xffff;
+}
+
+static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
+ const struct tlbe *tlbe)
+{
+ gpa_t gpa;
+
+ if (!get_tlb_v(tlbe))
+ return 0;
+
+ /* Does it match current guest AS? */
+ /* XXX what about IS != DS? */
+ if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
+ return 0;
+
+ gpa = get_tlb_raddr(tlbe);
+ if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
+ /* Mapping is not for RAM. */
+ return 0;
+
+ return 1;
+}
+
+static int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u32 inst)
+{
+ u64 eaddr;
+ u64 raddr;
+ u64 asid;
+ u32 flags;
+ struct tlbe *tlbe;
+ unsigned int ra;
+ unsigned int rs;
+ unsigned int ws;
+ unsigned int index;
+
+ ra = get_ra(inst);
+ rs = get_rs(inst);
+ ws = get_ws(inst);
+
+ index = vcpu->arch.gpr[ra];
+ if (index > PPC44x_TLB_SIZE) {
+ printk("%s: index %d\n", __func__, index);
+ kvmppc_dump_vcpu(vcpu);
+ return EMULATE_FAIL;
+ }
+
+ tlbe = &vcpu->arch.guest_tlb[index];
+
+ /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
+ if (tlbe->word0 & PPC44x_TLB_VALID) {
+ eaddr = get_tlb_eaddr(tlbe);
+ asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
+ kvmppc_mmu_invalidate(vcpu, eaddr, asid);
+ }
+
+ switch (ws) {
+ case PPC44x_TLB_PAGEID:
+ tlbe->tid = vcpu->arch.mmucr & 0xff;
+ tlbe->word0 = vcpu->arch.gpr[rs];
+ break;
+
+ case PPC44x_TLB_XLAT:
+ tlbe->word1 = vcpu->arch.gpr[rs];
+ break;
+
+ case PPC44x_TLB_ATTRIB:
+ tlbe->word2 = vcpu->arch.gpr[rs];
+ break;
+
+ default:
+ return EMULATE_FAIL;
+ }
+
+ if (tlbe_is_host_safe(vcpu, tlbe)) {
+ eaddr = get_tlb_eaddr(tlbe);
+ raddr = get_tlb_raddr(tlbe);
+ asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
+ flags = tlbe->word2 & 0xffff;
+
+ /* Create a 4KB mapping on the host. If the guest wanted a
+ * large page, only the first 4KB is mapped here and the rest
+ * are mapped on the fly. */
+ kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags);
+ }
+
+ return EMULATE_DONE;
+}
+
+static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.tcr & TCR_DIE) {
+ /* The decrementer ticks at the same rate as the timebase, so
+ * that's how we convert the guest DEC value to the number of
+ * host ticks. */
+ unsigned long nr_jiffies;
+
+ nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy;
+ mod_timer(&vcpu->arch.dec_timer,
+ get_jiffies_64() + nr_jiffies);
+ } else {
+ del_timer(&vcpu->arch.dec_timer);
+ }
+}
+
+static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.pc = vcpu->arch.srr0;
+ kvmppc_set_msr(vcpu, vcpu->arch.srr1);
+}
+
+/* XXX to do:
+ * lhax
+ * lhaux
+ * lswx
+ * lswi
+ * stswx
+ * stswi
+ * lha
+ * lhau
+ * lmw
+ * stmw
+ *
+ * XXX is_bigendian should depend on MMU mapping or MSR[LE]
+ */
+int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ u32 inst = vcpu->arch.last_inst;
+ u32 ea;
+ int ra;
+ int rb;
+ int rc;
+ int rs;
+ int rt;
+ int sprn;
+ int dcrn;
+ enum emulation_result emulated = EMULATE_DONE;
+ int advance = 1;
+
+ switch (get_op(inst)) {
+ case 3: /* trap */
+ printk("trap!\n");
+ kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM);
+ advance = 0;
+ break;
+
+ case 19:
+ switch (get_xop(inst)) {
+ case 50: /* rfi */
+ kvmppc_emul_rfi(vcpu);
+ advance = 0;
+ break;
+
+ default:
+ emulated = EMULATE_FAIL;
+ break;
+ }
+ break;
+
+ case 31:
+ switch (get_xop(inst)) {
+
+ case 83: /* mfmsr */
+ rt = get_rt(inst);
+ vcpu->arch.gpr[rt] = vcpu->arch.msr;
+ break;
+
+ case 87: /* lbzx */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
+ break;
+
+ case 131: /* wrtee */
+ rs = get_rs(inst);
+ vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
+ | (vcpu->arch.gpr[rs] & MSR_EE);
+ break;
+
+ case 146: /* mtmsr */
+ rs = get_rs(inst);
+ kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
+ break;
+
+ case 163: /* wrteei */
+ vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
+ | (inst & MSR_EE);
+ break;
+
+ case 215: /* stbx */
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 1, 1);
+ break;
+
+ case 247: /* stbux */
+ rs = get_rs(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ ea = vcpu->arch.gpr[rb];
+ if (ra)
+ ea += vcpu->arch.gpr[ra];
+
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 1, 1);
+ vcpu->arch.gpr[rs] = ea;
+ break;
+
+ case 279: /* lhzx */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
+ break;
+
+ case 311: /* lhzux */
+ rt = get_rt(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ ea = vcpu->arch.gpr[rb];
+ if (ra)
+ ea += vcpu->arch.gpr[ra];
+
+ emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
+ vcpu->arch.gpr[ra] = ea;
+ break;
+
+ case 323: /* mfdcr */
+ dcrn = get_dcrn(inst);
+ rt = get_rt(inst);
+
+ /* The guest may access CPR0 registers to determine the timebase
+ * frequency, and it must know the real host frequency because it
+ * can directly access the timebase registers.
+ *
+ * It would be possible to emulate those accesses in userspace,
+ * but userspace can really only figure out the end frequency.
+ * We could decompose that into the factors that compute it, but
+ * that's tricky math, and it's easier to just report the real
+ * CPR0 values.
+ */
+ switch (dcrn) {
+ case DCRN_CPR0_CONFIG_ADDR:
+ vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
+ break;
+ case DCRN_CPR0_CONFIG_DATA:
+ local_irq_disable();
+ mtdcr(DCRN_CPR0_CONFIG_ADDR,
+ vcpu->arch.cpr0_cfgaddr);
+ vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
+ local_irq_enable();
+ break;
+ default:
+ run->dcr.dcrn = dcrn;
+ run->dcr.data = 0;
+ run->dcr.is_write = 0;
+ vcpu->arch.io_gpr = rt;
+ vcpu->arch.dcr_needed = 1;
+ emulated = EMULATE_DO_DCR;
+ }
+
+ break;
+
+ case 339: /* mfspr */
+ sprn = get_sprn(inst);
+ rt = get_rt(inst);
+
+ switch (sprn) {
+ case SPRN_SRR0:
+ vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
+ case SPRN_SRR1:
+ vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
+ case SPRN_MMUCR:
+ vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
+ case SPRN_PID:
+ vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
+ case SPRN_IVPR:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
+ case SPRN_CCR0:
+ vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
+ case SPRN_CCR1:
+ vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
+ case SPRN_PVR:
+ vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
+ case SPRN_DEAR:
+ vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
+ case SPRN_ESR:
+ vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
+ case SPRN_DBCR0:
+ vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
+ case SPRN_DBCR1:
+ vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
+
+ /* Note: mftb and TBRL/TBWL are user-accessible, so
+ * the guest can always access the real TB anyways.
+ * In fact, we probably will never see these traps. */
+ case SPRN_TBWL:
+ vcpu->arch.gpr[rt] = mftbl(); break;
+ case SPRN_TBWU:
+ vcpu->arch.gpr[rt] = mftbu(); break;
+
+ case SPRN_SPRG0:
+ vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break;
+ case SPRN_SPRG1:
+ vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break;
+ case SPRN_SPRG2:
+ vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break;
+ case SPRN_SPRG3:
+ vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break;
+ /* Note: SPRG4-7 are user-readable, so we don't get
+ * a trap. */
+
+ case SPRN_IVOR0:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[0]; break;
+ case SPRN_IVOR1:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[1]; break;
+ case SPRN_IVOR2:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[2]; break;
+ case SPRN_IVOR3:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[3]; break;
+ case SPRN_IVOR4:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[4]; break;
+ case SPRN_IVOR5:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[5]; break;
+ case SPRN_IVOR6:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[6]; break;
+ case SPRN_IVOR7:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[7]; break;
+ case SPRN_IVOR8:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[8]; break;
+ case SPRN_IVOR9:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[9]; break;
+ case SPRN_IVOR10:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[10]; break;
+ case SPRN_IVOR11:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[11]; break;
+ case SPRN_IVOR12:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[12]; break;
+ case SPRN_IVOR13:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[13]; break;
+ case SPRN_IVOR14:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[14]; break;
+ case SPRN_IVOR15:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[15]; break;
+
+ default:
+ printk("mfspr: unknown spr %x\n", sprn);
+ vcpu->arch.gpr[rt] = 0;
+ break;
+ }
+ break;
+
+ case 407: /* sthx */
+ rs = get_rs(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 2, 1);
+ break;
+
+ case 439: /* sthux */
+ rs = get_rs(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ ea = vcpu->arch.gpr[rb];
+ if (ra)
+ ea += vcpu->arch.gpr[ra];
+
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 2, 1);
+ vcpu->arch.gpr[ra] = ea;
+ break;
+
+ case 451: /* mtdcr */
+ dcrn = get_dcrn(inst);
+ rs = get_rs(inst);
+
+ /* emulate some access in kernel */
+ switch (dcrn) {
+ case DCRN_CPR0_CONFIG_ADDR:
+ vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
+ break;
+ default:
+ run->dcr.dcrn = dcrn;
+ run->dcr.data = vcpu->arch.gpr[rs];
+ run->dcr.is_write = 1;
+ vcpu->arch.dcr_needed = 1;
+ emulated = EMULATE_DO_DCR;
+ }
+
+ break;
+
+ case 467: /* mtspr */
+ sprn = get_sprn(inst);
+ rs = get_rs(inst);
+ switch (sprn) {
+ case SPRN_SRR0:
+ vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SRR1:
+ vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;
+ case SPRN_MMUCR:
+ vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
+ case SPRN_PID:
+ vcpu->arch.pid = vcpu->arch.gpr[rs]; break;
+ case SPRN_CCR0:
+ vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
+ case SPRN_CCR1:
+ vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
+ case SPRN_DEAR:
+ vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
+ case SPRN_ESR:
+ vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
+ case SPRN_DBCR0:
+ vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
+ case SPRN_DBCR1:
+ vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
+
+ /* XXX We need to context-switch the timebase for
+ * watchdog and FIT. */
+ case SPRN_TBWL: break;
+ case SPRN_TBWU: break;
+
+ case SPRN_DEC:
+ vcpu->arch.dec = vcpu->arch.gpr[rs];
+ kvmppc_emulate_dec(vcpu);
+ break;
+
+ case SPRN_TSR:
+ vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
+
+ case SPRN_TCR:
+ vcpu->arch.tcr = vcpu->arch.gpr[rs];
+ kvmppc_emulate_dec(vcpu);
+ break;
+
+ case SPRN_SPRG0:
+ vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG1:
+ vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG2:
+ vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG3:
+ vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;
+
+ /* Note: SPRG4-7 are user-readable. These values are
+ * loaded into the real SPRGs when resuming the
+ * guest. */
+ case SPRN_SPRG4:
+ vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG5:
+ vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG6:
+ vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG7:
+ vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
+
+ case SPRN_IVPR:
+ vcpu->arch.ivpr = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR0:
+ vcpu->arch.ivor[0] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR1:
+ vcpu->arch.ivor[1] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR2:
+ vcpu->arch.ivor[2] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR3:
+ vcpu->arch.ivor[3] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR4:
+ vcpu->arch.ivor[4] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR5:
+ vcpu->arch.ivor[5] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR6:
+ vcpu->arch.ivor[6] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR7:
+ vcpu->arch.ivor[7] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR8:
+ vcpu->arch.ivor[8] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR9:
+ vcpu->arch.ivor[9] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR10:
+ vcpu->arch.ivor[10] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR11:
+ vcpu->arch.ivor[11] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR12:
+ vcpu->arch.ivor[12] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR13:
+ vcpu->arch.ivor[13] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR14:
+ vcpu->arch.ivor[14] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR15:
+ vcpu->arch.ivor[15] = vcpu->arch.gpr[rs]; break;
+
+ default:
+ printk("mtspr: unknown spr %x\n", sprn);
+ emulated = EMULATE_FAIL;
+ break;
+ }
+ break;
+
+ case 470: /* dcbi */
+ /* Do nothing. The guest is performing dcbi because
+ * hardware DMA is not snooped by the dcache, but
+ * emulated DMA either goes through the dcache as
+ * normal writes, or the host kernel has handled dcache
+ * coherence. */
+ break;
+
+ case 534: /* lwbrx */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
+ break;
+
+ case 566: /* tlbsync */
+ break;
+
+ case 662: /* stwbrx */
+ rs = get_rs(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 4, 0);
+ break;
+
+ case 978: /* tlbwe */
+ emulated = kvmppc_emul_tlbwe(vcpu, inst);
+ break;
+
+ case 914: { /* tlbsx */
+ int index;
+ unsigned int as = get_mmucr_sts(vcpu);
+ unsigned int pid = get_mmucr_stid(vcpu);
+
+ rt = get_rt(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+ rc = get_rc(inst);
+
+ ea = vcpu->arch.gpr[rb];
+ if (ra)
+ ea += vcpu->arch.gpr[ra];
+
+ index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
+ if (rc) {
+ if (index < 0)
+ vcpu->arch.cr &= ~0x20000000;
+ else
+ vcpu->arch.cr |= 0x20000000;
+ }
+ vcpu->arch.gpr[rt] = index;
+
+ }
+ break;
+
+ case 790: /* lhbrx */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
+ break;
+
+ case 918: /* sthbrx */
+ rs = get_rs(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 2, 0);
+ break;
+
+ case 966: /* iccci */
+ break;
+
+ default:
+ printk("unknown: op %d xop %d\n", get_op(inst),
+ get_xop(inst));
+ emulated = EMULATE_FAIL;
+ break;
+ }
+ break;
+
+ case 32: /* lwz */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
+ break;
+
+ case 33: /* lwzu */
+ ra = get_ra(inst);
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ case 34: /* lbz */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
+ break;
+
+ case 35: /* lbzu */
+ ra = get_ra(inst);
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ case 36: /* stw */
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 4, 1);
+ break;
+
+ case 37: /* stwu */
+ ra = get_ra(inst);
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 4, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ case 38: /* stb */
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 1, 1);
+ break;
+
+ case 39: /* stbu */
+ ra = get_ra(inst);
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 1, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ case 40: /* lhz */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
+ break;
+
+ case 41: /* lhzu */
+ ra = get_ra(inst);
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ case 44: /* sth */
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 2, 1);
+ break;
+
+ case 45: /* sthu */
+ ra = get_ra(inst);
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 2, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ default:
+ printk("unknown op %d\n", get_op(inst));
+ emulated = EMULATE_FAIL;
+ break;
+ }
+
+ if (advance)
+ vcpu->arch.pc += 4; /* Advance past emulated instruction. */
+
+ return emulated;
+}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
new file mode 100644
index 000000000000..bad40bd2d3ac
--- /dev/null
+++ b/arch/powerpc/kvm/powerpc.c
@@ -0,0 +1,436 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <asm/cputable.h>
+#include <asm/uaccess.h>
+#include <asm/kvm_ppc.h>
+
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+ return gfn;
+}
+
+int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
+{
+ /* XXX implement me */
+ return 0;
+}
+
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
+{
+ return 1;
+}
+
+
+int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er;
+ int r;
+
+ er = kvmppc_emulate_instruction(run, vcpu);
+ switch (er) {
+ case EMULATE_DONE:
+ /* Future optimization: only reload non-volatiles if they were
+ * actually modified. */
+ r = RESUME_GUEST_NV;
+ break;
+ case EMULATE_DO_MMIO:
+ run->exit_reason = KVM_EXIT_MMIO;
+ /* We must reload nonvolatiles because "update" load/store
+ * instructions modify register state. */
+ /* Future optimization: only reload non-volatiles if they were
+ * actually modified. */
+ r = RESUME_HOST_NV;
+ break;
+ case EMULATE_FAIL:
+ /* XXX Deliver Program interrupt to guest. */
+ printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
+ vcpu->arch.last_inst);
+ r = RESUME_HOST;
+ break;
+ default:
+ BUG();
+ }
+
+ return r;
+}
+
+void kvm_arch_hardware_enable(void *garbage)
+{
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+ return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+ int r;
+
+ if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
+ r = 0;
+ else
+ r = -ENOTSUPP;
+
+ *(int *)rtn = r;
+}
+
+struct kvm *kvm_arch_create_vm(void)
+{
+ struct kvm *kvm;
+
+ kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ if (!kvm)
+ return ERR_PTR(-ENOMEM);
+
+ return kvm;
+}
+
+static void kvmppc_free_vcpus(struct kvm *kvm)
+{
+ unsigned int i;
+
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ if (kvm->vcpus[i]) {
+ kvm_arch_vcpu_free(kvm->vcpus[i]);
+ kvm->vcpus[i] = NULL;
+ }
+ }
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+ kvmppc_free_vcpus(kvm);
+ kvm_free_physmem(kvm);
+ kfree(kvm);
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+ int r;
+
+ switch (ext) {
+ case KVM_CAP_USER_MEMORY:
+ r = 1;
+ break;
+ default:
+ r = 0;
+ break;
+ }
+ return r;
+
+}
+
+long kvm_arch_dev_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_set_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ return 0;
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ struct kvm_vcpu *vcpu;
+ int err;
+
+ vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ if (!vcpu) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = kvm_vcpu_init(vcpu, kvm, id);
+ if (err)
+ goto free_vcpu;
+
+ return vcpu;
+
+free_vcpu:
+ kmem_cache_free(kvm_vcpu_cache, vcpu);
+out:
+ return ERR_PTR(err);
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ kvm_vcpu_uninit(vcpu);
+ kmem_cache_free(kvm_vcpu_cache, vcpu);
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ kvm_arch_vcpu_free(vcpu);
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ unsigned int priority = exception_priority[BOOKE_INTERRUPT_DECREMENTER];
+
+ return test_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+static void kvmppc_decrementer_func(unsigned long data)
+{
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+ kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER);
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ setup_timer(&vcpu->arch.dec_timer, kvmppc_decrementer_func,
+ (unsigned long)vcpu);
+
+ return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+}
+
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+}
+
+void decache_vcpus_on_cpu(int cpu)
+{
+}
+
+int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
+ struct kvm_debug_guest *dbg)
+{
+ return -ENOTSUPP;
+}
+
+static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
+ struct kvm_run *run)
+{
+ u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
+ *gpr = run->dcr.data;
+}
+
+static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
+ struct kvm_run *run)
+{
+ u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
+
+ if (run->mmio.len > sizeof(*gpr)) {
+ printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
+ return;
+ }
+
+ if (vcpu->arch.mmio_is_bigendian) {
+ switch (run->mmio.len) {
+ case 4: *gpr = *(u32 *)run->mmio.data; break;
+ case 2: *gpr = *(u16 *)run->mmio.data; break;
+ case 1: *gpr = *(u8 *)run->mmio.data; break;
+ }
+ } else {
+ /* Convert BE data from userland back to LE. */
+ switch (run->mmio.len) {
+ case 4: *gpr = ld_le32((u32 *)run->mmio.data); break;
+ case 2: *gpr = ld_le16((u16 *)run->mmio.data); break;
+ case 1: *gpr = *(u8 *)run->mmio.data; break;
+ }
+ }
+}
+
+int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes, int is_bigendian)
+{
+ if (bytes > sizeof(run->mmio.data)) {
+ printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ }
+
+ run->mmio.phys_addr = vcpu->arch.paddr_accessed;
+ run->mmio.len = bytes;
+ run->mmio.is_write = 0;
+
+ vcpu->arch.io_gpr = rt;
+ vcpu->arch.mmio_is_bigendian = is_bigendian;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 0;
+
+ return EMULATE_DO_MMIO;
+}
+
+int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ u32 val, unsigned int bytes, int is_bigendian)
+{
+ void *data = run->mmio.data;
+
+ if (bytes > sizeof(run->mmio.data)) {
+ printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ }
+
+ run->mmio.phys_addr = vcpu->arch.paddr_accessed;
+ run->mmio.len = bytes;
+ run->mmio.is_write = 1;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 1;
+
+ /* Store the value at the lowest bytes in 'data'. */
+ if (is_bigendian) {
+ switch (bytes) {
+ case 4: *(u32 *)data = val; break;
+ case 2: *(u16 *)data = val; break;
+ case 1: *(u8 *)data = val; break;
+ }
+ } else {
+ /* Store LE value into 'data'. */
+ switch (bytes) {
+ case 4: st_le32(data, val); break;
+ case 2: st_le16(data, val); break;
+ case 1: *(u8 *)data = val; break;
+ }
+ }
+
+ return EMULATE_DO_MMIO;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ int r;
+ sigset_t sigsaved;
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+ if (vcpu->mmio_needed) {
+ if (!vcpu->mmio_is_write)
+ kvmppc_complete_mmio_load(vcpu, run);
+ vcpu->mmio_needed = 0;
+ } else if (vcpu->arch.dcr_needed) {
+ if (!vcpu->arch.dcr_is_write)
+ kvmppc_complete_dcr_load(vcpu, run);
+ vcpu->arch.dcr_needed = 0;
+ }
+
+ kvmppc_check_and_deliver_interrupts(vcpu);
+
+ local_irq_disable();
+ kvm_guest_enter();
+ r = __kvmppc_vcpu_run(run, vcpu);
+ kvm_guest_exit();
+ local_irq_enable();
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+ return r;
+}
+
+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
+{
+ kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ long r;
+
+ switch (ioctl) {
+ case KVM_INTERRUPT: {
+ struct kvm_interrupt irq;
+ r = -EFAULT;
+ if (copy_from_user(&irq, argp, sizeof(irq)))
+ goto out;
+ r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+ break;
+ }
+ default:
+ r = -EINVAL;
+ }
+
+out:
+ return r;
+}
+
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+ return -ENOTSUPP;
+}
+
+long kvm_arch_vm_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ long r;
+
+ switch (ioctl) {
+ default:
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+int kvm_arch_init(void *opaque)
+{
+ return 0;
+}
+
+void kvm_arch_exit(void)
+{
+}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 5ccb579b81e4..d9e37f365b54 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -110,15 +110,6 @@ EXPORT_SYMBOL(phys_mem_access_prot);
#ifdef CONFIG_MEMORY_HOTPLUG
-void online_page(struct page *page)
-{
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalram_pages++;
- num_physpages++;
-}
-
#ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid(u64 start)
{
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 18b8ebe930d5..5e1e8cf14e75 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -3,11 +3,12 @@
*
* Initial author: Xianghua Xiao <x.xiao@freescale.com>
* Recode: Jason Jin <jason.jin@freescale.com>
+ * York Sun <yorksun@freescale.com>
*
* Rewrite the interrupt routing. remove the 8259PIC support,
* All the integrated device in ULI use sideband interrupt.
*
- * Copyright 2007 Freescale Semiconductor Inc.
+ * Copyright 2008 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -38,6 +39,8 @@
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
+static unsigned char *pixis_bdcfg0, *pixis_arch;
+
static struct of_device_id __initdata mpc8610_ids[] = {
{ .compatible = "fsl,mpc8610-immr", },
{}
@@ -52,8 +55,7 @@ static int __init mpc8610_declare_of_platform_devices(void)
}
machine_device_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices);
-static void __init
-mpc86xx_hpcd_init_irq(void)
+static void __init mpc86xx_hpcd_init_irq(void)
{
struct mpic *mpic1;
struct device_node *np;
@@ -161,12 +163,159 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5229, quirk_uli5229);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, 0x5288, final_uli5288);
#endif /* CONFIG_PCI */
-static void __init
-mpc86xx_hpcd_setup_arch(void)
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+
+static u32 get_busfreq(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
+ struct device_node *node;
+
+ u32 fs_busfreq = 0;
+ node = of_find_node_by_type(NULL, "cpu");
+ if (node) {
+ unsigned int size;
+ const unsigned int *prop =
+ of_get_property(node, "bus-frequency", &size);
+ if (prop)
+ fs_busfreq = *prop;
+ of_node_put(node);
+ };
+ return fs_busfreq;
+}
+
+unsigned int mpc8610hpcd_get_pixel_format(unsigned int bits_per_pixel,
+ int monitor_port)
+{
+ static const unsigned long pixelformat[][3] = {
+ {0x88882317, 0x88083218, 0x65052119},
+ {0x88883316, 0x88082219, 0x65053118},
+ };
+ unsigned int pix_fmt, arch_monitor;
+
+ arch_monitor = ((*pixis_arch == 0x01) && (monitor_port == 0))? 0 : 1;
+ /* DVI port for board version 0x01 */
+
+ if (bits_per_pixel == 32)
+ pix_fmt = pixelformat[arch_monitor][0];
+ else if (bits_per_pixel == 24)
+ pix_fmt = pixelformat[arch_monitor][1];
+ else if (bits_per_pixel == 16)
+ pix_fmt = pixelformat[arch_monitor][2];
+ else
+ pix_fmt = pixelformat[1][0];
+
+ return pix_fmt;
+}
+
+void mpc8610hpcd_set_gamma_table(int monitor_port, char *gamma_table_base)
+{
+ int i;
+ if (monitor_port == 2) { /* dual link LVDS */
+ for (i = 0; i < 256*3; i++)
+ gamma_table_base[i] = (gamma_table_base[i] << 2) |
+ ((gamma_table_base[i] >> 6) & 0x03);
+ }
+}
+
+void mpc8610hpcd_set_monitor_port(int monitor_port)
+{
+ static const u8 bdcfg[] = {0xBD, 0xB5, 0xA5};
+ if (monitor_port < 3)
+ *pixis_bdcfg0 = bdcfg[monitor_port];
+}
+
+void mpc8610hpcd_set_pixel_clock(unsigned int pixclock)
+{
+ u32 __iomem *clkdvdr;
+ u32 temp;
+ /* variables for pixel clock calcs */
+ ulong bestval, bestfreq, speed_ccb, minpixclock, maxpixclock;
+ ulong pixval;
+ long err;
+ int i;
+
+ clkdvdr = ioremap(get_immrbase() + 0xe0800, sizeof(u32));
+ if (!clkdvdr) {
+ printk(KERN_ERR "Err: can't map clock divider register!\n");
+ return;
+ }
+
+ /* Pixel Clock configuration */
+ pr_debug("DIU: Bus Frequency = %d\n", get_busfreq());
+ speed_ccb = get_busfreq();
+
+ /* Calculate the pixel clock with the smallest error */
+ /* calculate the following in steps to avoid overflow */
+ pr_debug("DIU pixclock in ps - %d\n", pixclock);
+ temp = 1000000000/pixclock;
+ temp *= 1000;
+ pixclock = temp;
+ pr_debug("DIU pixclock freq - %u\n", pixclock);
+
+ temp = pixclock * 5 / 100;
+ pr_debug("deviation = %d\n", temp);
+ minpixclock = pixclock - temp;
+ maxpixclock = pixclock + temp;
+ pr_debug("DIU minpixclock - %lu\n", minpixclock);
+ pr_debug("DIU maxpixclock - %lu\n", maxpixclock);
+ pixval = speed_ccb/pixclock;
+ pr_debug("DIU pixval = %lu\n", pixval);
+
+ err = 100000000;
+ bestval = pixval;
+ pr_debug("DIU bestval = %lu\n", bestval);
+
+ bestfreq = 0;
+ for (i = -1; i <= 1; i++) {
+ temp = speed_ccb / ((pixval+i) + 1);
+ pr_debug("DIU test pixval i= %d, pixval=%lu, temp freq. = %u\n",
+ i, pixval, temp);
+ if ((temp < minpixclock) || (temp > maxpixclock))
+ pr_debug("DIU exceeds monitor range (%lu to %lu)\n",
+ minpixclock, maxpixclock);
+ else if (abs(temp - pixclock) < err) {
+ pr_debug("Entered the else if block %d\n", i);
+ err = abs(temp - pixclock);
+ bestval = pixval+i;
+ bestfreq = temp;
+ }
+ }
+
+ pr_debug("DIU chose = %lx\n", bestval);
+ pr_debug("DIU error = %ld\n NomPixClk ", err);
+ pr_debug("DIU: Best Freq = %lx\n", bestfreq);
+ /* Modify PXCLK in GUTS CLKDVDR */
+ pr_debug("DIU: Current value of CLKDVDR = 0x%08x\n", (*clkdvdr));
+ temp = (*clkdvdr) & 0x2000FFFF;
+ *clkdvdr = temp; /* turn off clock */
+ *clkdvdr = temp | 0x80000000 | (((bestval) & 0x1F) << 16);
+ pr_debug("DIU: Modified value of CLKDVDR = 0x%08x\n", (*clkdvdr));
+ iounmap(clkdvdr);
+}
+
+ssize_t mpc8610hpcd_show_monitor_port(int monitor_port, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE,
+ "%c0 - DVI\n"
+ "%c1 - Single link LVDS\n"
+ "%c2 - Dual link LVDS\n",
+ monitor_port == 0 ? '*' : ' ',
+ monitor_port == 1 ? '*' : ' ',
+ monitor_port == 2 ? '*' : ' ');
+}
+
+int mpc8610hpcd_set_sysfs_monitor_port(int val)
+{
+ return val < 3 ? val : 0;
+}
+
#endif
+
+static void __init mpc86xx_hpcd_setup_arch(void)
+{
+ struct resource r;
+ struct device_node *np;
+ unsigned char *pixis;
+
if (ppc_md.progress)
ppc_md.progress("mpc86xx_hpcd_setup_arch()", 0);
@@ -183,6 +332,30 @@ mpc86xx_hpcd_setup_arch(void)
}
}
#endif
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+ preallocate_diu_videomemory();
+ diu_ops.get_pixel_format = mpc8610hpcd_get_pixel_format;
+ diu_ops.set_gamma_table = mpc8610hpcd_set_gamma_table;
+ diu_ops.set_monitor_port = mpc8610hpcd_set_monitor_port;
+ diu_ops.set_pixel_clock = mpc8610hpcd_set_pixel_clock;
+ diu_ops.show_monitor_port = mpc8610hpcd_show_monitor_port;
+ diu_ops.set_sysfs_monitor_port = mpc8610hpcd_set_sysfs_monitor_port;
+#endif
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,fpga-pixis");
+ if (np) {
+ of_address_to_resource(np, 0, &r);
+ of_node_put(np);
+ pixis = ioremap(r.start, 32);
+ if (!pixis) {
+ printk(KERN_ERR "Err: can't map FPGA cfg register!\n");
+ return;
+ }
+ pixis_bdcfg0 = pixis + 8;
+ pixis_arch = pixis + 1;
+ } else
+ printk(KERN_ERR "Err: "
+ "can't find device node 'fsl,fpga-pixis'\n");
printk("MPC86xx HPCD board from Freescale Semiconductor\n");
}
@@ -200,8 +373,7 @@ static int __init mpc86xx_hpcd_probe(void)
return 0;
}
-static long __init
-mpc86xx_time_init(void)
+static long __init mpc86xx_time_init(void)
{
unsigned int temp;
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index d359d6e92975..7f59188cd9a1 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -143,7 +143,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
*/
static int
axon_ram_direct_access(struct block_device *device, sector_t sector,
- unsigned long *data)
+ void **kaddr, unsigned long *pfn)
{
struct axon_ram_bank *bank = device->bd_disk->private_data;
loff_t offset;
@@ -154,7 +154,8 @@ axon_ram_direct_access(struct block_device *device, sector_t sector,
return -ERANGE;
}
- *data = bank->ph_addr + offset;
+ *kaddr = (void *)(bank->ph_addr + offset);
+ *pfn = virt_to_phys(kaddr) >> PAGE_SHIFT;
return 0;
}
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 5c1b246aaccc..7b45670c7af3 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -892,3 +892,44 @@ void fsl_rstcr_restart(char *cmd)
while (1) ;
}
#endif
+
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+struct platform_diu_data_ops diu_ops = {
+ .diu_size = 1280 * 1024 * 4, /* default one 1280x1024 buffer */
+};
+EXPORT_SYMBOL(diu_ops);
+
+int __init preallocate_diu_videomemory(void)
+{
+ pr_debug("diu_size=%lu\n", diu_ops.diu_size);
+
+ diu_ops.diu_mem = __alloc_bootmem(diu_ops.diu_size, 8, 0);
+ if (!diu_ops.diu_mem) {
+ printk(KERN_ERR "fsl-diu: cannot allocate %lu bytes\n",
+ diu_ops.diu_size);
+ return -ENOMEM;
+ }
+
+ pr_debug("diu_mem=%p\n", diu_ops.diu_mem);
+
+ rh_init(&diu_ops.diu_rh_info, 4096, ARRAY_SIZE(diu_ops.diu_rh_block),
+ diu_ops.diu_rh_block);
+ return rh_attach_region(&diu_ops.diu_rh_info,
+ (unsigned long) diu_ops.diu_mem,
+ diu_ops.diu_size);
+}
+
+static int __init early_parse_diufb(char *p)
+{
+ if (!p)
+ return 1;
+
+ diu_ops.diu_size = _ALIGN_UP(memparse(p, &p), 8);
+
+ pr_debug("diu_size=%lu\n", diu_ops.diu_size);
+
+ return 0;
+}
+early_param("diufb", early_parse_diufb);
+
+#endif
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index 74c4a9657b33..52c831fa1886 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -17,5 +17,28 @@ extern int fsl_spi_init(struct spi_board_info *board_infos,
void (*deactivate_cs)(u8 cs, u8 polarity));
extern void fsl_rstcr_restart(char *cmd);
+
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+#include <linux/bootmem.h>
+#include <asm/rheap.h>
+struct platform_diu_data_ops {
+ rh_block_t diu_rh_block[16];
+ rh_info_t diu_rh_info;
+ unsigned long diu_size;
+ void *diu_mem;
+
+ unsigned int (*get_pixel_format) (unsigned int bits_per_pixel,
+ int monitor_port);
+ void (*set_gamma_table) (int monitor_port, char *gamma_table_base);
+ void (*set_monitor_port) (int monitor_port);
+ void (*set_pixel_clock) (unsigned int pixclock);
+ ssize_t (*show_monitor_port) (int monitor_port, char *buf);
+ int (*set_sysfs_monitor_port) (int val);
+};
+
+extern struct platform_diu_data_ops diu_ops;
+int __init preallocate_diu_videomemory(void);
+#endif
+
#endif
#endif
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f6a68e178fc5..8f5f02160ffc 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -62,6 +62,10 @@ config GENERIC_LOCKBREAK
default y
depends on SMP && PREEMPT
+config PGSTE
+ bool
+ default y if KVM
+
mainmenu "Linux Kernel Configuration"
config S390
@@ -69,6 +73,7 @@ config S390
select HAVE_OPROFILE
select HAVE_KPROBES
select HAVE_KRETPROBES
+ select HAVE_KVM if 64BIT
source "init/Kconfig"
@@ -515,6 +520,13 @@ config ZFCPDUMP
Select this option if you want to build an zfcpdump enabled kernel.
Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this.
+config S390_GUEST
+bool "s390 guest support (EXPERIMENTAL)"
+ depends on 64BIT && EXPERIMENTAL
+ select VIRTIO
+ select VIRTIO_RING
+ help
+ Select this option if you want to run the kernel under s390 linux
endmenu
source "net/Kconfig"
@@ -536,3 +548,5 @@ source "security/Kconfig"
source "crypto/Kconfig"
source "lib/Kconfig"
+
+source "arch/s390/kvm/Kconfig"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index f708be367b03..792a4e7743ce 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -87,7 +87,7 @@ LDFLAGS_vmlinux := -e start
head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o
core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \
- arch/s390/appldata/ arch/s390/hypfs/
+ arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/
libs-y += arch/s390/lib/
drivers-y += drivers/s390/
drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 540a67f979b6..68ec4083bf73 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -144,6 +144,10 @@ static noinline __init void detect_machine_type(void)
/* Running on a P/390 ? */
if (cpuinfo->cpu_id.machine == 0x7490)
machine_flags |= 4;
+
+ /* Running under KVM ? */
+ if (cpuinfo->cpu_id.version == 0xfe)
+ machine_flags |= 64;
}
#ifdef CONFIG_64BIT
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7141147e6b63..a9d18aafa5f4 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -316,7 +316,11 @@ static int __init early_parse_ipldelay(char *p)
early_param("ipldelay", early_parse_ipldelay);
#ifdef CONFIG_S390_SWITCH_AMODE
+#ifdef CONFIG_PGSTE
+unsigned int switch_amode = 1;
+#else
unsigned int switch_amode = 0;
+#endif
EXPORT_SYMBOL_GPL(switch_amode);
static void set_amode_and_uaccess(unsigned long user_amode,
@@ -797,9 +801,13 @@ setup_arch(char **cmdline_p)
"This machine has an IEEE fpu\n" :
"This machine has no IEEE fpu\n");
#else /* CONFIG_64BIT */
- printk((MACHINE_IS_VM) ?
- "We are running under VM (64 bit mode)\n" :
- "We are running native (64 bit mode)\n");
+ if (MACHINE_IS_VM)
+ printk("We are running under VM (64 bit mode)\n");
+ else if (MACHINE_IS_KVM) {
+ printk("We are running under KVM (64 bit mode)\n");
+ add_preferred_console("ttyS", 1, NULL);
+ } else
+ printk("We are running native (64 bit mode)\n");
#endif /* CONFIG_64BIT */
/* Save unparsed command line copy for /proc/cmdline */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index c5f05b3fb2c3..ca90ee3f930e 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -110,6 +110,7 @@ void account_system_vtime(struct task_struct *tsk)
S390_lowcore.steal_clock -= cputime << 12;
account_system_time(tsk, 0, cputime);
}
+EXPORT_SYMBOL_GPL(account_system_vtime);
static inline void set_vtimer(__u64 expires)
{
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
new file mode 100644
index 000000000000..1761b74d639b
--- /dev/null
+++ b/arch/s390/kvm/Kconfig
@@ -0,0 +1,46 @@
+#
+# KVM configuration
+#
+config HAVE_KVM
+ bool
+
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
+ default y
+ ---help---
+ Say Y here to get to see options for using your Linux host to run other
+ operating systems inside virtual machines (guests).
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+config KVM
+ tristate "Kernel-based Virtual Machine (KVM) support"
+ depends on HAVE_KVM && EXPERIMENTAL
+ select PREEMPT_NOTIFIERS
+ select ANON_INODES
+ select S390_SWITCH_AMODE
+ select PREEMPT
+ ---help---
+ Support hosting paravirtualized guest machines using the SIE
+ virtualization capability on the mainframe. This should work
+ on any 64bit machine.
+
+ This module provides access to the hardware capabilities through
+ a character device node named /dev/kvm.
+
+ To compile this as a module, choose M here: the module
+ will be called kvm.
+
+ If unsure, say N.
+
+config KVM_TRACE
+ bool
+
+# OK, it's a little counter-intuitive to do this, but it puts it neatly under
+# the virtualization menu.
+source drivers/virtio/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
new file mode 100644
index 000000000000..e5221ec0b8e3
--- /dev/null
+++ b/arch/s390/kvm/Makefile
@@ -0,0 +1,14 @@
+# Makefile for kernel virtual machines on s390
+#
+# Copyright IBM Corp. 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (version 2 only)
+# as published by the Free Software Foundation.
+
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o)
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/s390/kvm
+
+kvm-objs := $(common-objs) kvm-s390.o sie64a.o intercept.o interrupt.o priv.o sigp.o diag.o
+obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
new file mode 100644
index 000000000000..f639a152869f
--- /dev/null
+++ b/arch/s390/kvm/diag.c
@@ -0,0 +1,67 @@
+/*
+ * diag.c - handling diagnose instructions
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include "kvm-s390.h"
+
+static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
+{
+ VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
+ vcpu->stat.diagnose_44++;
+ vcpu_put(vcpu);
+ schedule();
+ vcpu_load(vcpu);
+ return 0;
+}
+
+static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
+{
+ unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
+ unsigned long subcode = vcpu->arch.guest_gprs[reg] & 0xffff;
+
+ VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode);
+ switch (subcode) {
+ case 3:
+ vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
+ break;
+ case 4:
+ vcpu->run->s390_reset_flags = 0;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+ vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
+ vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
+ vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
+ vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
+ VCPU_EVENT(vcpu, 3, "requesting userspace resets %lx",
+ vcpu->run->s390_reset_flags);
+ return -EREMOTE;
+}
+
+int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
+{
+ int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16;
+
+ switch (code) {
+ case 0x44:
+ return __diag_time_slice_end(vcpu);
+ case 0x308:
+ return __diag_ipl_functions(vcpu);
+ default:
+ return -ENOTSUPP;
+ }
+}
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
new file mode 100644
index 000000000000..4e0633c413f3
--- /dev/null
+++ b/arch/s390/kvm/gaccess.h
@@ -0,0 +1,274 @@
+/*
+ * gaccess.h - access guest memory
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ */
+
+#ifndef __KVM_S390_GACCESS_H
+#define __KVM_S390_GACCESS_H
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+#include <asm/uaccess.h>
+
+static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
+ u64 guestaddr)
+{
+ u64 prefix = vcpu->arch.sie_block->prefix;
+ u64 origin = vcpu->kvm->arch.guest_origin;
+ u64 memsize = vcpu->kvm->arch.guest_memsize;
+
+ if (guestaddr < 2 * PAGE_SIZE)
+ guestaddr += prefix;
+ else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
+ guestaddr -= prefix;
+
+ if (guestaddr > memsize)
+ return (void __user __force *) ERR_PTR(-EFAULT);
+
+ guestaddr += origin;
+
+ return (void __user *) guestaddr;
+}
+
+static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
+ u64 *result)
+{
+ void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+ BUG_ON(guestaddr & 7);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ return get_user(*result, (u64 __user *) uptr);
+}
+
+static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
+ u32 *result)
+{
+ void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+ BUG_ON(guestaddr & 3);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ return get_user(*result, (u32 __user *) uptr);
+}
+
+static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
+ u16 *result)
+{
+ void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+ BUG_ON(guestaddr & 1);
+
+ if (IS_ERR(uptr))
+ return PTR_ERR(uptr);
+
+ return get_user(*result, (u16 __user *) uptr);
+}
+
+static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
+ u8 *result)
+{
+ void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ return get_user(*result, (u8 __user *) uptr);
+}
+
+static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
+ u64 value)
+{
+ void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+ BUG_ON(guestaddr & 7);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ return put_user(value, (u64 __user *) uptr);
+}
+
+static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
+ u32 value)
+{
+ void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+ BUG_ON(guestaddr & 3);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ return put_user(value, (u32 __user *) uptr);
+}
+
+static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
+ u16 value)
+{
+ void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+ BUG_ON(guestaddr & 1);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ return put_user(value, (u16 __user *) uptr);
+}
+
+static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
+ u8 value)
+{
+ void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ return put_user(value, (u8 __user *) uptr);
+}
+
+
+static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest,
+ const void *from, unsigned long n)
+{
+ int rc;
+ unsigned long i;
+ const u8 *data = from;
+
+ for (i = 0; i < n; i++) {
+ rc = put_guest_u8(vcpu, guestdest++, *(data++));
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
+
+static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest,
+ const void *from, unsigned long n)
+{
+ u64 prefix = vcpu->arch.sie_block->prefix;
+ u64 origin = vcpu->kvm->arch.guest_origin;
+ u64 memsize = vcpu->kvm->arch.guest_memsize;
+
+ if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
+ goto slowpath;
+
+ if ((guestdest < prefix) && (guestdest + n > prefix))
+ goto slowpath;
+
+ if ((guestdest < prefix + 2 * PAGE_SIZE)
+ && (guestdest + n > prefix + 2 * PAGE_SIZE))
+ goto slowpath;
+
+ if (guestdest < 2 * PAGE_SIZE)
+ guestdest += prefix;
+ else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
+ guestdest -= prefix;
+
+ if (guestdest + n > memsize)
+ return -EFAULT;
+
+ if (guestdest + n < guestdest)
+ return -EFAULT;
+
+ guestdest += origin;
+
+ return copy_to_user((void __user *) guestdest, from, n);
+slowpath:
+ return __copy_to_guest_slow(vcpu, guestdest, from, n);
+}
+
+static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
+ u64 guestsrc, unsigned long n)
+{
+ int rc;
+ unsigned long i;
+ u8 *data = to;
+
+ for (i = 0; i < n; i++) {
+ rc = get_guest_u8(vcpu, guestsrc++, data++);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
+
+static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
+ u64 guestsrc, unsigned long n)
+{
+ u64 prefix = vcpu->arch.sie_block->prefix;
+ u64 origin = vcpu->kvm->arch.guest_origin;
+ u64 memsize = vcpu->kvm->arch.guest_memsize;
+
+ if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
+ goto slowpath;
+
+ if ((guestsrc < prefix) && (guestsrc + n > prefix))
+ goto slowpath;
+
+ if ((guestsrc < prefix + 2 * PAGE_SIZE)
+ && (guestsrc + n > prefix + 2 * PAGE_SIZE))
+ goto slowpath;
+
+ if (guestsrc < 2 * PAGE_SIZE)
+ guestsrc += prefix;
+ else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
+ guestsrc -= prefix;
+
+ if (guestsrc + n > memsize)
+ return -EFAULT;
+
+ if (guestsrc + n < guestsrc)
+ return -EFAULT;
+
+ guestsrc += origin;
+
+ return copy_from_user(to, (void __user *) guestsrc, n);
+slowpath:
+ return __copy_from_guest_slow(vcpu, to, guestsrc, n);
+}
+
+static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest,
+ const void *from, unsigned long n)
+{
+ u64 origin = vcpu->kvm->arch.guest_origin;
+ u64 memsize = vcpu->kvm->arch.guest_memsize;
+
+ if (guestdest + n > memsize)
+ return -EFAULT;
+
+ if (guestdest + n < guestdest)
+ return -EFAULT;
+
+ guestdest += origin;
+
+ return copy_to_user((void __user *) guestdest, from, n);
+}
+
+static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
+ u64 guestsrc, unsigned long n)
+{
+ u64 origin = vcpu->kvm->arch.guest_origin;
+ u64 memsize = vcpu->kvm->arch.guest_memsize;
+
+ if (guestsrc + n > memsize)
+ return -EFAULT;
+
+ if (guestsrc + n < guestsrc)
+ return -EFAULT;
+
+ guestsrc += origin;
+
+ return copy_from_user(to, (void __user *) guestsrc, n);
+}
+#endif
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
new file mode 100644
index 000000000000..349581a26103
--- /dev/null
+++ b/arch/s390/kvm/intercept.c
@@ -0,0 +1,216 @@
+/*
+ * intercept.c - in-kernel handling for sie intercepts
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/errno.h>
+#include <linux/pagemap.h>
+
+#include <asm/kvm_host.h>
+
+#include "kvm-s390.h"
+#include "gaccess.h"
+
+static int handle_lctg(struct kvm_vcpu *vcpu)
+{
+ int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
+ int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
+ int base2 = vcpu->arch.sie_block->ipb >> 28;
+ int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
+ ((vcpu->arch.sie_block->ipb & 0xff00) << 4);
+ u64 useraddr;
+ int reg, rc;
+
+ vcpu->stat.instruction_lctg++;
+ if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
+ return -ENOTSUPP;
+
+ useraddr = disp2;
+ if (base2)
+ useraddr += vcpu->arch.guest_gprs[base2];
+
+ reg = reg1;
+
+ VCPU_EVENT(vcpu, 5, "lctg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
+ disp2);
+
+ do {
+ rc = get_guest_u64(vcpu, useraddr,
+ &vcpu->arch.sie_block->gcr[reg]);
+ if (rc == -EFAULT) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ break;
+ }
+ useraddr += 8;
+ if (reg == reg3)
+ break;
+ reg = (reg + 1) % 16;
+ } while (1);
+ return 0;
+}
+
+static int handle_lctl(struct kvm_vcpu *vcpu)
+{
+ int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
+ int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
+ int base2 = vcpu->arch.sie_block->ipb >> 28;
+ int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ u64 useraddr;
+ u32 val = 0;
+ int reg, rc;
+
+ vcpu->stat.instruction_lctl++;
+
+ useraddr = disp2;
+ if (base2)
+ useraddr += vcpu->arch.guest_gprs[base2];
+
+ VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
+ disp2);
+
+ reg = reg1;
+ do {
+ rc = get_guest_u32(vcpu, useraddr, &val);
+ if (rc == -EFAULT) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ break;
+ }
+ vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
+ vcpu->arch.sie_block->gcr[reg] |= val;
+ useraddr += 4;
+ if (reg == reg3)
+ break;
+ reg = (reg + 1) % 16;
+ } while (1);
+ return 0;
+}
+
+static intercept_handler_t instruction_handlers[256] = {
+ [0x83] = kvm_s390_handle_diag,
+ [0xae] = kvm_s390_handle_sigp,
+ [0xb2] = kvm_s390_handle_priv,
+ [0xb7] = handle_lctl,
+ [0xeb] = handle_lctg,
+};
+
+static int handle_noop(struct kvm_vcpu *vcpu)
+{
+ switch (vcpu->arch.sie_block->icptcode) {
+ case 0x10:
+ vcpu->stat.exit_external_request++;
+ break;
+ case 0x14:
+ vcpu->stat.exit_external_interrupt++;
+ break;
+ default:
+ break; /* nothing */
+ }
+ return 0;
+}
+
+static int handle_stop(struct kvm_vcpu *vcpu)
+{
+ int rc;
+
+ vcpu->stat.exit_stop_request++;
+ atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+ spin_lock_bh(&vcpu->arch.local_int.lock);
+ if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
+ vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
+ rc = __kvm_s390_vcpu_store_status(vcpu,
+ KVM_S390_STORE_STATUS_NOADDR);
+ if (rc >= 0)
+ rc = -ENOTSUPP;
+ }
+
+ if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
+ vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
+ VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
+ rc = -ENOTSUPP;
+ } else
+ rc = 0;
+ spin_unlock_bh(&vcpu->arch.local_int.lock);
+ return rc;
+}
+
+static int handle_validity(struct kvm_vcpu *vcpu)
+{
+ int viwhy = vcpu->arch.sie_block->ipb >> 16;
+ vcpu->stat.exit_validity++;
+ if (viwhy == 0x37) {
+ fault_in_pages_writeable((char __user *)
+ vcpu->kvm->arch.guest_origin +
+ vcpu->arch.sie_block->prefix,
+ PAGE_SIZE);
+ return 0;
+ }
+ VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
+ viwhy);
+ return -ENOTSUPP;
+}
+
+static int handle_instruction(struct kvm_vcpu *vcpu)
+{
+ intercept_handler_t handler;
+
+ vcpu->stat.exit_instruction++;
+ handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
+ if (handler)
+ return handler(vcpu);
+ return -ENOTSUPP;
+}
+
+static int handle_prog(struct kvm_vcpu *vcpu)
+{
+ vcpu->stat.exit_program_interruption++;
+ return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc);
+}
+
+static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
+{
+ int rc, rc2;
+
+ vcpu->stat.exit_instr_and_program++;
+ rc = handle_instruction(vcpu);
+ rc2 = handle_prog(vcpu);
+
+ if (rc == -ENOTSUPP)
+ vcpu->arch.sie_block->icptcode = 0x04;
+ if (rc)
+ return rc;
+ return rc2;
+}
+
+static const intercept_handler_t intercept_funcs[0x48 >> 2] = {
+ [0x00 >> 2] = handle_noop,
+ [0x04 >> 2] = handle_instruction,
+ [0x08 >> 2] = handle_prog,
+ [0x0C >> 2] = handle_instruction_and_prog,
+ [0x10 >> 2] = handle_noop,
+ [0x14 >> 2] = handle_noop,
+ [0x1C >> 2] = kvm_s390_handle_wait,
+ [0x20 >> 2] = handle_validity,
+ [0x28 >> 2] = handle_stop,
+};
+
+int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
+{
+ intercept_handler_t func;
+ u8 code = vcpu->arch.sie_block->icptcode;
+
+ if (code & 3 || code > 0x48)
+ return -ENOTSUPP;
+ func = intercept_funcs[code >> 2];
+ if (func)
+ return func(vcpu);
+ return -ENOTSUPP;
+}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
new file mode 100644
index 000000000000..fcd1ed8015c1
--- /dev/null
+++ b/arch/s390/kvm/interrupt.c
@@ -0,0 +1,592 @@
+/*
+ * interrupt.c - handling kvm guest interrupts
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ */
+
+#include <asm/lowcore.h>
+#include <asm/uaccess.h>
+#include <linux/kvm_host.h>
+#include "kvm-s390.h"
+#include "gaccess.h"
+
+static int psw_extint_disabled(struct kvm_vcpu *vcpu)
+{
+ return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
+}
+
+static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
+{
+ if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
+ (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
+ (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
+ return 0;
+ return 1;
+}
+
+static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
+ struct interrupt_info *inti)
+{
+ switch (inti->type) {
+ case KVM_S390_INT_EMERGENCY:
+ if (psw_extint_disabled(vcpu))
+ return 0;
+ if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
+ return 1;
+ return 0;
+ case KVM_S390_INT_SERVICE:
+ if (psw_extint_disabled(vcpu))
+ return 0;
+ if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
+ return 1;
+ return 0;
+ case KVM_S390_INT_VIRTIO:
+ if (psw_extint_disabled(vcpu))
+ return 0;
+ if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
+ return 1;
+ return 0;
+ case KVM_S390_PROGRAM_INT:
+ case KVM_S390_SIGP_STOP:
+ case KVM_S390_SIGP_SET_PREFIX:
+ case KVM_S390_RESTART:
+ return 1;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+static void __set_cpu_idle(struct kvm_vcpu *vcpu)
+{
+ BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
+ atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+ set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
+}
+
+static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
+{
+ BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
+ atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+ clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
+}
+
+static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
+{
+ atomic_clear_mask(CPUSTAT_ECALL_PEND |
+ CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
+ &vcpu->arch.sie_block->cpuflags);
+ vcpu->arch.sie_block->lctl = 0x0000;
+}
+
+static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
+{
+ atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
+}
+
+static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
+ struct interrupt_info *inti)
+{
+ switch (inti->type) {
+ case KVM_S390_INT_EMERGENCY:
+ case KVM_S390_INT_SERVICE:
+ case KVM_S390_INT_VIRTIO:
+ if (psw_extint_disabled(vcpu))
+ __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
+ else
+ vcpu->arch.sie_block->lctl |= LCTL_CR0;
+ break;
+ case KVM_S390_SIGP_STOP:
+ __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
+ struct interrupt_info *inti)
+{
+ const unsigned short table[] = { 2, 4, 4, 6 };
+ int rc, exception = 0;
+
+ switch (inti->type) {
+ case KVM_S390_INT_EMERGENCY:
+ VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
+ vcpu->stat.deliver_emergency_signal++;
+ rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_EXT_NEW_PSW, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+ break;
+
+ case KVM_S390_INT_SERVICE:
+ VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
+ inti->ext.ext_params);
+ vcpu->stat.deliver_service_signal++;
+ rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_EXT_NEW_PSW, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
+ if (rc == -EFAULT)
+ exception = 1;
+ break;
+
+ case KVM_S390_INT_VIRTIO:
+ VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%lx",
+ inti->ext.ext_params, inti->ext.ext_params2);
+ vcpu->stat.deliver_virtio_interrupt++;
+ rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, 0x0d00);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_EXT_NEW_PSW, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u64(vcpu, __LC_PFAULT_INTPARM,
+ inti->ext.ext_params2);
+ if (rc == -EFAULT)
+ exception = 1;
+ break;
+
+ case KVM_S390_SIGP_STOP:
+ VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
+ vcpu->stat.deliver_stop_signal++;
+ __set_intercept_indicator(vcpu, inti);
+ break;
+
+ case KVM_S390_SIGP_SET_PREFIX:
+ VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
+ inti->prefix.address);
+ vcpu->stat.deliver_prefix_signal++;
+ vcpu->arch.sie_block->prefix = inti->prefix.address;
+ vcpu->arch.sie_block->ihcpu = 0xffff;
+ break;
+
+ case KVM_S390_RESTART:
+ VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
+ vcpu->stat.deliver_restart_signal++;
+ rc = copy_to_guest(vcpu, offsetof(struct _lowcore,
+ restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+ break;
+
+ case KVM_S390_PROGRAM_INT:
+ VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
+ inti->pgm.code,
+ table[vcpu->arch.sie_block->ipa >> 14]);
+ vcpu->stat.deliver_program_int++;
+ rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u16(vcpu, __LC_PGM_ILC,
+ table[vcpu->arch.sie_block->ipa >> 14]);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_PGM_NEW_PSW, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+ break;
+
+ default:
+ BUG();
+ }
+
+ if (exception) {
+ VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering"
+ " interrupt");
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ if (inti->type == KVM_S390_PROGRAM_INT) {
+ printk(KERN_WARNING "kvm: recursive program check\n");
+ BUG();
+ }
+ }
+}
+
+static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
+{
+ int rc, exception = 0;
+
+ if (psw_extint_disabled(vcpu))
+ return 0;
+ if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
+ return 0;
+ rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004);
+ if (rc == -EFAULT)
+ exception = 1;
+ rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+ rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_EXT_NEW_PSW, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ if (exception) {
+ VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" \
+ " ckc interrupt");
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ return 0;
+ }
+
+ return 1;
+}
+
+int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ struct local_interrupt *li = &vcpu->arch.local_int;
+ struct float_interrupt *fi = vcpu->arch.local_int.float_int;
+ struct interrupt_info *inti;
+ int rc = 0;
+
+ if (atomic_read(&li->active)) {
+ spin_lock_bh(&li->lock);
+ list_for_each_entry(inti, &li->list, list)
+ if (__interrupt_is_deliverable(vcpu, inti)) {
+ rc = 1;
+ break;
+ }
+ spin_unlock_bh(&li->lock);
+ }
+
+ if ((!rc) && atomic_read(&fi->active)) {
+ spin_lock_bh(&fi->lock);
+ list_for_each_entry(inti, &fi->list, list)
+ if (__interrupt_is_deliverable(vcpu, inti)) {
+ rc = 1;
+ break;
+ }
+ spin_unlock_bh(&fi->lock);
+ }
+
+ if ((!rc) && (vcpu->arch.sie_block->ckc <
+ get_clock() + vcpu->arch.sie_block->epoch)) {
+ if ((!psw_extint_disabled(vcpu)) &&
+ (vcpu->arch.sie_block->gcr[0] & 0x800ul))
+ rc = 1;
+ }
+
+ return rc;
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
+{
+ u64 now, sltime;
+ DECLARE_WAITQUEUE(wait, current);
+
+ vcpu->stat.exit_wait_state++;
+ if (kvm_cpu_has_interrupt(vcpu))
+ return 0;
+
+ if (psw_interrupts_disabled(vcpu)) {
+ VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
+ __unset_cpu_idle(vcpu);
+ return -ENOTSUPP; /* disabled wait */
+ }
+
+ if (psw_extint_disabled(vcpu) ||
+ (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) {
+ VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
+ goto no_timer;
+ }
+
+ now = get_clock() + vcpu->arch.sie_block->epoch;
+ if (vcpu->arch.sie_block->ckc < now) {
+ __unset_cpu_idle(vcpu);
+ return 0;
+ }
+
+ sltime = (vcpu->arch.sie_block->ckc - now) / (0xf4240000ul / HZ) + 1;
+
+ vcpu->arch.ckc_timer.expires = jiffies + sltime;
+
+ add_timer(&vcpu->arch.ckc_timer);
+ VCPU_EVENT(vcpu, 5, "enabled wait timer:%lx jiffies", sltime);
+no_timer:
+ spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
+ spin_lock_bh(&vcpu->arch.local_int.lock);
+ __set_cpu_idle(vcpu);
+ vcpu->arch.local_int.timer_due = 0;
+ add_wait_queue(&vcpu->arch.local_int.wq, &wait);
+ while (list_empty(&vcpu->arch.local_int.list) &&
+ list_empty(&vcpu->arch.local_int.float_int->list) &&
+ (!vcpu->arch.local_int.timer_due) &&
+ !signal_pending(current)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_bh(&vcpu->arch.local_int.lock);
+ spin_unlock_bh(&vcpu->arch.local_int.float_int->lock);
+ vcpu_put(vcpu);
+ schedule();
+ vcpu_load(vcpu);
+ spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
+ spin_lock_bh(&vcpu->arch.local_int.lock);
+ }
+ __unset_cpu_idle(vcpu);
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&vcpu->wq, &wait);
+ spin_unlock_bh(&vcpu->arch.local_int.lock);
+ spin_unlock_bh(&vcpu->arch.local_int.float_int->lock);
+ del_timer(&vcpu->arch.ckc_timer);
+ return 0;
+}
+
+void kvm_s390_idle_wakeup(unsigned long data)
+{
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+ spin_lock_bh(&vcpu->arch.local_int.lock);
+ vcpu->arch.local_int.timer_due = 1;
+ if (waitqueue_active(&vcpu->arch.local_int.wq))
+ wake_up_interruptible(&vcpu->arch.local_int.wq);
+ spin_unlock_bh(&vcpu->arch.local_int.lock);
+}
+
+
+void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
+{
+ struct local_interrupt *li = &vcpu->arch.local_int;
+ struct float_interrupt *fi = vcpu->arch.local_int.float_int;
+ struct interrupt_info *n, *inti = NULL;
+ int deliver;
+
+ __reset_intercept_indicators(vcpu);
+ if (atomic_read(&li->active)) {
+ do {
+ deliver = 0;
+ spin_lock_bh(&li->lock);
+ list_for_each_entry_safe(inti, n, &li->list, list) {
+ if (__interrupt_is_deliverable(vcpu, inti)) {
+ list_del(&inti->list);
+ deliver = 1;
+ break;
+ }
+ __set_intercept_indicator(vcpu, inti);
+ }
+ if (list_empty(&li->list))
+ atomic_set(&li->active, 0);
+ spin_unlock_bh(&li->lock);
+ if (deliver) {
+ __do_deliver_interrupt(vcpu, inti);
+ kfree(inti);
+ }
+ } while (deliver);
+ }
+
+ if ((vcpu->arch.sie_block->ckc <
+ get_clock() + vcpu->arch.sie_block->epoch))
+ __try_deliver_ckc_interrupt(vcpu);
+
+ if (atomic_read(&fi->active)) {
+ do {
+ deliver = 0;
+ spin_lock_bh(&fi->lock);
+ list_for_each_entry_safe(inti, n, &fi->list, list) {
+ if (__interrupt_is_deliverable(vcpu, inti)) {
+ list_del(&inti->list);
+ deliver = 1;
+ break;
+ }
+ __set_intercept_indicator(vcpu, inti);
+ }
+ if (list_empty(&fi->list))
+ atomic_set(&fi->active, 0);
+ spin_unlock_bh(&fi->lock);
+ if (deliver) {
+ __do_deliver_interrupt(vcpu, inti);
+ kfree(inti);
+ }
+ } while (deliver);
+ }
+}
+
+int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
+{
+ struct local_interrupt *li = &vcpu->arch.local_int;
+ struct interrupt_info *inti;
+
+ inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+ if (!inti)
+ return -ENOMEM;
+
+ inti->type = KVM_S390_PROGRAM_INT;;
+ inti->pgm.code = code;
+
+ VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
+ spin_lock_bh(&li->lock);
+ list_add(&inti->list, &li->list);
+ atomic_set(&li->active, 1);
+ BUG_ON(waitqueue_active(&li->wq));
+ spin_unlock_bh(&li->lock);
+ return 0;
+}
+
+int kvm_s390_inject_vm(struct kvm *kvm,
+ struct kvm_s390_interrupt *s390int)
+{
+ struct local_interrupt *li;
+ struct float_interrupt *fi;
+ struct interrupt_info *inti;
+ int sigcpu;
+
+ inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+ if (!inti)
+ return -ENOMEM;
+
+ switch (s390int->type) {
+ case KVM_S390_INT_VIRTIO:
+ VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%lx",
+ s390int->parm, s390int->parm64);
+ inti->type = s390int->type;
+ inti->ext.ext_params = s390int->parm;
+ inti->ext.ext_params2 = s390int->parm64;
+ break;
+ case KVM_S390_INT_SERVICE:
+ VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
+ inti->type = s390int->type;
+ inti->ext.ext_params = s390int->parm;
+ break;
+ case KVM_S390_PROGRAM_INT:
+ case KVM_S390_SIGP_STOP:
+ case KVM_S390_INT_EMERGENCY:
+ default:
+ kfree(inti);
+ return -EINVAL;
+ }
+
+ mutex_lock(&kvm->lock);
+ fi = &kvm->arch.float_int;
+ spin_lock_bh(&fi->lock);
+ list_add_tail(&inti->list, &fi->list);
+ atomic_set(&fi->active, 1);
+ sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
+ if (sigcpu == KVM_MAX_VCPUS) {
+ do {
+ sigcpu = fi->next_rr_cpu++;
+ if (sigcpu == KVM_MAX_VCPUS)
+ sigcpu = fi->next_rr_cpu = 0;
+ } while (fi->local_int[sigcpu] == NULL);
+ }
+ li = fi->local_int[sigcpu];
+ spin_lock_bh(&li->lock);
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ if (waitqueue_active(&li->wq))
+ wake_up_interruptible(&li->wq);
+ spin_unlock_bh(&li->lock);
+ spin_unlock_bh(&fi->lock);
+ mutex_unlock(&kvm->lock);
+ return 0;
+}
+
+int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
+ struct kvm_s390_interrupt *s390int)
+{
+ struct local_interrupt *li;
+ struct interrupt_info *inti;
+
+ inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+ if (!inti)
+ return -ENOMEM;
+
+ switch (s390int->type) {
+ case KVM_S390_PROGRAM_INT:
+ if (s390int->parm & 0xffff0000) {
+ kfree(inti);
+ return -EINVAL;
+ }
+ inti->type = s390int->type;
+ inti->pgm.code = s390int->parm;
+ VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
+ s390int->parm);
+ break;
+ case KVM_S390_SIGP_STOP:
+ case KVM_S390_RESTART:
+ case KVM_S390_SIGP_SET_PREFIX:
+ case KVM_S390_INT_EMERGENCY:
+ VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
+ inti->type = s390int->type;
+ break;
+ case KVM_S390_INT_VIRTIO:
+ case KVM_S390_INT_SERVICE:
+ default:
+ kfree(inti);
+ return -EINVAL;
+ }
+
+ mutex_lock(&vcpu->kvm->lock);
+ li = &vcpu->arch.local_int;
+ spin_lock_bh(&li->lock);
+ if (inti->type == KVM_S390_PROGRAM_INT)
+ list_add(&inti->list, &li->list);
+ else
+ list_add_tail(&inti->list, &li->list);
+ atomic_set(&li->active, 1);
+ if (inti->type == KVM_S390_SIGP_STOP)
+ li->action_bits |= ACTION_STOP_ON_STOP;
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ if (waitqueue_active(&li->wq))
+ wake_up_interruptible(&vcpu->arch.local_int.wq);
+ spin_unlock_bh(&li->lock);
+ mutex_unlock(&vcpu->kvm->lock);
+ return 0;
+}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
new file mode 100644
index 000000000000..98d1e73e01f1
--- /dev/null
+++ b/arch/s390/kvm/kvm-s390.c
@@ -0,0 +1,685 @@
+/*
+ * s390host.c -- hosting zSeries kernel virtual machines
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <asm/lowcore.h>
+#include <asm/pgtable.h>
+
+#include "kvm-s390.h"
+#include "gaccess.h"
+
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+ { "userspace_handled", VCPU_STAT(exit_userspace) },
+ { "exit_validity", VCPU_STAT(exit_validity) },
+ { "exit_stop_request", VCPU_STAT(exit_stop_request) },
+ { "exit_external_request", VCPU_STAT(exit_external_request) },
+ { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
+ { "exit_instruction", VCPU_STAT(exit_instruction) },
+ { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
+ { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
+ { "instruction_lctg", VCPU_STAT(instruction_lctg) },
+ { "instruction_lctl", VCPU_STAT(instruction_lctl) },
+ { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
+ { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
+ { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
+ { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
+ { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
+ { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
+ { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
+ { "exit_wait_state", VCPU_STAT(exit_wait_state) },
+ { "instruction_stidp", VCPU_STAT(instruction_stidp) },
+ { "instruction_spx", VCPU_STAT(instruction_spx) },
+ { "instruction_stpx", VCPU_STAT(instruction_stpx) },
+ { "instruction_stap", VCPU_STAT(instruction_stap) },
+ { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
+ { "instruction_stsch", VCPU_STAT(instruction_stsch) },
+ { "instruction_chsc", VCPU_STAT(instruction_chsc) },
+ { "instruction_stsi", VCPU_STAT(instruction_stsi) },
+ { "instruction_stfl", VCPU_STAT(instruction_stfl) },
+ { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
+ { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
+ { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
+ { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
+ { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
+ { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
+ { "diagnose_44", VCPU_STAT(diagnose_44) },
+ { NULL }
+};
+
+
+/* Section: not file related */
+void kvm_arch_hardware_enable(void *garbage)
+{
+ /* every s390 is virtualization enabled ;-) */
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+void decache_vcpus_on_cpu(int cpu)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+ return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+}
+
+int kvm_arch_init(void *opaque)
+{
+ return 0;
+}
+
+void kvm_arch_exit(void)
+{
+}
+
+/* Section: device related */
+long kvm_arch_dev_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ if (ioctl == KVM_S390_ENABLE_SIE)
+ return s390_enable_sie();
+ return -EINVAL;
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+ return 0;
+}
+
+/* Section: vm related */
+/*
+ * Get (and clear) the dirty memory log for a memory slot.
+ */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log)
+{
+ return 0;
+}
+
+long kvm_arch_vm_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm *kvm = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ int r;
+
+ switch (ioctl) {
+ case KVM_S390_INTERRUPT: {
+ struct kvm_s390_interrupt s390int;
+
+ r = -EFAULT;
+ if (copy_from_user(&s390int, argp, sizeof(s390int)))
+ break;
+ r = kvm_s390_inject_vm(kvm, &s390int);
+ break;
+ }
+ default:
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+struct kvm *kvm_arch_create_vm(void)
+{
+ struct kvm *kvm;
+ int rc;
+ char debug_name[16];
+
+ rc = s390_enable_sie();
+ if (rc)
+ goto out_nokvm;
+
+ rc = -ENOMEM;
+ kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ if (!kvm)
+ goto out_nokvm;
+
+ kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
+ if (!kvm->arch.sca)
+ goto out_nosca;
+
+ sprintf(debug_name, "kvm-%u", current->pid);
+
+ kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
+ if (!kvm->arch.dbf)
+ goto out_nodbf;
+
+ spin_lock_init(&kvm->arch.float_int.lock);
+ INIT_LIST_HEAD(&kvm->arch.float_int.list);
+
+ debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
+ VM_EVENT(kvm, 3, "%s", "vm created");
+
+ try_module_get(THIS_MODULE);
+
+ return kvm;
+out_nodbf:
+ free_page((unsigned long)(kvm->arch.sca));
+out_nosca:
+ kfree(kvm);
+out_nokvm:
+ return ERR_PTR(rc);
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+ debug_unregister(kvm->arch.dbf);
+ free_page((unsigned long)(kvm->arch.sca));
+ kfree(kvm);
+ module_put(THIS_MODULE);
+}
+
+/* Section: vcpu related */
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ /* kvm common code refers to this, but does'nt call it */
+ BUG();
+}
+
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ save_fp_regs(&vcpu->arch.host_fpregs);
+ save_access_regs(vcpu->arch.host_acrs);
+ vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
+ restore_fp_regs(&vcpu->arch.guest_fpregs);
+ restore_access_regs(vcpu->arch.guest_acrs);
+
+ if (signal_pending(current))
+ atomic_set_mask(CPUSTAT_STOP_INT,
+ &vcpu->arch.sie_block->cpuflags);
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ save_fp_regs(&vcpu->arch.guest_fpregs);
+ save_access_regs(vcpu->arch.guest_acrs);
+ restore_fp_regs(&vcpu->arch.host_fpregs);
+ restore_access_regs(vcpu->arch.host_acrs);
+}
+
+static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
+{
+ /* this equals initial cpu reset in pop, but we don't switch to ESA */
+ vcpu->arch.sie_block->gpsw.mask = 0UL;
+ vcpu->arch.sie_block->gpsw.addr = 0UL;
+ vcpu->arch.sie_block->prefix = 0UL;
+ vcpu->arch.sie_block->ihcpu = 0xffff;
+ vcpu->arch.sie_block->cputm = 0UL;
+ vcpu->arch.sie_block->ckc = 0UL;
+ vcpu->arch.sie_block->todpr = 0;
+ memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
+ vcpu->arch.sie_block->gcr[0] = 0xE0UL;
+ vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
+ vcpu->arch.guest_fpregs.fpc = 0;
+ asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
+ vcpu->arch.sie_block->gbea = 1;
+}
+
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
+ vcpu->arch.sie_block->gmslm = 0xffffffffffUL;
+ vcpu->arch.sie_block->gmsor = 0x000000000000;
+ vcpu->arch.sie_block->ecb = 2;
+ vcpu->arch.sie_block->eca = 0xC1002001U;
+ setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
+ (unsigned long) vcpu);
+ get_cpu_id(&vcpu->arch.cpu_id);
+ vcpu->arch.cpu_id.version = 0xfe;
+ return 0;
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+ unsigned int id)
+{
+ struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+ int rc = -ENOMEM;
+
+ if (!vcpu)
+ goto out_nomem;
+
+ vcpu->arch.sie_block = (struct sie_block *) get_zeroed_page(GFP_KERNEL);
+
+ if (!vcpu->arch.sie_block)
+ goto out_free_cpu;
+
+ vcpu->arch.sie_block->icpua = id;
+ BUG_ON(!kvm->arch.sca);
+ BUG_ON(kvm->arch.sca->cpu[id].sda);
+ kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
+ vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
+ vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
+
+ spin_lock_init(&vcpu->arch.local_int.lock);
+ INIT_LIST_HEAD(&vcpu->arch.local_int.list);
+ vcpu->arch.local_int.float_int = &kvm->arch.float_int;
+ spin_lock_bh(&kvm->arch.float_int.lock);
+ kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
+ init_waitqueue_head(&vcpu->arch.local_int.wq);
+ vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
+ spin_unlock_bh(&kvm->arch.float_int.lock);
+
+ rc = kvm_vcpu_init(vcpu, kvm, id);
+ if (rc)
+ goto out_free_cpu;
+ VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
+ vcpu->arch.sie_block);
+
+ try_module_get(THIS_MODULE);
+
+ return vcpu;
+out_free_cpu:
+ kfree(vcpu);
+out_nomem:
+ return ERR_PTR(rc);
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ VCPU_EVENT(vcpu, 3, "%s", "destroy cpu");
+ free_page((unsigned long)(vcpu->arch.sie_block));
+ kfree(vcpu);
+ module_put(THIS_MODULE);
+}
+
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+ /* kvm common code refers to this, but never calls it */
+ BUG();
+ return 0;
+}
+
+static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
+{
+ vcpu_load(vcpu);
+ kvm_s390_vcpu_initial_reset(vcpu);
+ vcpu_put(vcpu);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ vcpu_load(vcpu);
+ memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
+ vcpu_put(vcpu);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ vcpu_load(vcpu);
+ memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
+ vcpu_put(vcpu);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ vcpu_load(vcpu);
+ memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
+ memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
+ vcpu_put(vcpu);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ vcpu_load(vcpu);
+ memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
+ memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
+ vcpu_put(vcpu);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ vcpu_load(vcpu);
+ memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
+ vcpu->arch.guest_fpregs.fpc = fpu->fpc;
+ vcpu_put(vcpu);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ vcpu_load(vcpu);
+ memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
+ fpu->fpc = vcpu->arch.guest_fpregs.fpc;
+ vcpu_put(vcpu);
+ return 0;
+}
+
+static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
+{
+ int rc = 0;
+
+ vcpu_load(vcpu);
+ if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
+ rc = -EBUSY;
+ else
+ vcpu->arch.sie_block->gpsw = psw;
+ vcpu_put(vcpu);
+ return rc;
+}
+
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr)
+{
+ return -EINVAL; /* not implemented yet */
+}
+
+int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
+ struct kvm_debug_guest *dbg)
+{
+ return -EINVAL; /* not implemented yet */
+}
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL; /* not implemented yet */
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL; /* not implemented yet */
+}
+
+static void __vcpu_run(struct kvm_vcpu *vcpu)
+{
+ memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
+
+ if (need_resched())
+ schedule();
+
+ vcpu->arch.sie_block->icptcode = 0;
+ local_irq_disable();
+ kvm_guest_enter();
+ local_irq_enable();
+ VCPU_EVENT(vcpu, 6, "entering sie flags %x",
+ atomic_read(&vcpu->arch.sie_block->cpuflags));
+ sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
+ VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
+ vcpu->arch.sie_block->icptcode);
+ local_irq_disable();
+ kvm_guest_exit();
+ local_irq_enable();
+
+ memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ int rc;
+ sigset_t sigsaved;
+
+ vcpu_load(vcpu);
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+ atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+
+ BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
+
+ switch (kvm_run->exit_reason) {
+ case KVM_EXIT_S390_SIEIC:
+ vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
+ vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
+ break;
+ case KVM_EXIT_UNKNOWN:
+ case KVM_EXIT_S390_RESET:
+ break;
+ default:
+ BUG();
+ }
+
+ might_sleep();
+
+ do {
+ kvm_s390_deliver_pending_interrupts(vcpu);
+ __vcpu_run(vcpu);
+ rc = kvm_handle_sie_intercept(vcpu);
+ } while (!signal_pending(current) && !rc);
+
+ if (signal_pending(current) && !rc)
+ rc = -EINTR;
+
+ if (rc == -ENOTSUPP) {
+ /* intercept cannot be handled in-kernel, prepare kvm-run */
+ kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
+ kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
+ kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
+ kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
+ kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
+ kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
+ rc = 0;
+ }
+
+ if (rc == -EREMOTE) {
+ /* intercept was handled, but userspace support is needed
+ * kvm_run has been prepared by the handler */
+ rc = 0;
+ }
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+ vcpu_put(vcpu);
+
+ vcpu->stat.exit_userspace++;
+ return rc;
+}
+
+static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
+ unsigned long n, int prefix)
+{
+ if (prefix)
+ return copy_to_guest(vcpu, guestdest, from, n);
+ else
+ return copy_to_guest_absolute(vcpu, guestdest, from, n);
+}
+
+/*
+ * store status at address
+ * we use have two special cases:
+ * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
+ * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
+ */
+int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+ const unsigned char archmode = 1;
+ int prefix;
+
+ if (addr == KVM_S390_STORE_STATUS_NOADDR) {
+ if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
+ return -EFAULT;
+ addr = SAVE_AREA_BASE;
+ prefix = 0;
+ } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
+ if (copy_to_guest(vcpu, 163ul, &archmode, 1))
+ return -EFAULT;
+ addr = SAVE_AREA_BASE;
+ prefix = 1;
+ } else
+ prefix = 0;
+
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
+ vcpu->arch.guest_fpregs.fprs, 128, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
+ vcpu->arch.guest_gprs, 128, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
+ &vcpu->arch.sie_block->gpsw, 16, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
+ &vcpu->arch.sie_block->prefix, 4, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu,
+ addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
+ &vcpu->arch.guest_fpregs.fpc, 4, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
+ &vcpu->arch.sie_block->todpr, 4, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
+ &vcpu->arch.sie_block->cputm, 8, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
+ &vcpu->arch.sie_block->ckc, 8, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
+ &vcpu->arch.guest_acrs, 64, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu,
+ addr + offsetof(struct save_area_s390x, ctrl_regs),
+ &vcpu->arch.sie_block->gcr, 128, prefix))
+ return -EFAULT;
+ return 0;
+}
+
+static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+ int rc;
+
+ vcpu_load(vcpu);
+ rc = __kvm_s390_vcpu_store_status(vcpu, addr);
+ vcpu_put(vcpu);
+ return rc;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+
+ switch (ioctl) {
+ case KVM_S390_INTERRUPT: {
+ struct kvm_s390_interrupt s390int;
+
+ if (copy_from_user(&s390int, argp, sizeof(s390int)))
+ return -EFAULT;
+ return kvm_s390_inject_vcpu(vcpu, &s390int);
+ }
+ case KVM_S390_STORE_STATUS:
+ return kvm_s390_vcpu_store_status(vcpu, arg);
+ case KVM_S390_SET_INITIAL_PSW: {
+ psw_t psw;
+
+ if (copy_from_user(&psw, argp, sizeof(psw)))
+ return -EFAULT;
+ return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
+ }
+ case KVM_S390_INITIAL_RESET:
+ return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
+ default:
+ ;
+ }
+ return -EINVAL;
+}
+
+/* Section: memory related */
+int kvm_arch_set_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ /* A few sanity checks. We can have exactly one memory slot which has
+ to start at guest virtual zero and which has to be located at a
+ page boundary in userland and which has to end at a page boundary.
+ The memory in userland is ok to be fragmented into various different
+ vmas. It is okay to mmap() and munmap() stuff in this slot after
+ doing this call at any time */
+
+ if (mem->slot)
+ return -EINVAL;
+
+ if (mem->guest_phys_addr)
+ return -EINVAL;
+
+ if (mem->userspace_addr & (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ if (mem->memory_size & (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ kvm->arch.guest_origin = mem->userspace_addr;
+ kvm->arch.guest_memsize = mem->memory_size;
+
+ /* FIXME: we do want to interrupt running CPUs and update their memory
+ configuration now to avoid race conditions. But hey, changing the
+ memory layout while virtual CPUs are running is usually bad
+ programming practice. */
+
+ return 0;
+}
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+ return gfn;
+}
+
+static int __init kvm_s390_init(void)
+{
+ return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
+}
+
+static void __exit kvm_s390_exit(void)
+{
+ kvm_exit();
+}
+
+module_init(kvm_s390_init);
+module_exit(kvm_s390_exit);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
new file mode 100644
index 000000000000..3893cf12eacf
--- /dev/null
+++ b/arch/s390/kvm/kvm-s390.h
@@ -0,0 +1,64 @@
+/*
+ * kvm_s390.h - definition for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#ifndef ARCH_S390_KVM_S390_H
+#define ARCH_S390_KVM_S390_H
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+
+typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
+
+int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
+
+#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
+do { \
+ debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
+ d_args); \
+} while (0)
+
+#define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\
+do { \
+ debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
+ "%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \
+ d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
+ d_args); \
+} while (0)
+
+static inline int __cpu_is_stopped(struct kvm_vcpu *vcpu)
+{
+ return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOP_INT;
+}
+
+int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
+void kvm_s390_idle_wakeup(unsigned long data);
+void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
+int kvm_s390_inject_vm(struct kvm *kvm,
+ struct kvm_s390_interrupt *s390int);
+int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
+ struct kvm_s390_interrupt *s390int);
+int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
+
+/* implemented in priv.c */
+int kvm_s390_handle_priv(struct kvm_vcpu *vcpu);
+
+/* implemented in sigp.c */
+int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
+
+/* implemented in kvm-s390.c */
+int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu,
+ unsigned long addr);
+/* implemented in diag.c */
+int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
+
+#endif
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
new file mode 100644
index 000000000000..1465946325c5
--- /dev/null
+++ b/arch/s390/kvm/priv.c
@@ -0,0 +1,323 @@
+/*
+ * priv.c - handling privileged instructions
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/kvm.h>
+#include <linux/errno.h>
+#include <asm/current.h>
+#include <asm/debug.h>
+#include <asm/ebcdic.h>
+#include <asm/sysinfo.h>
+#include "gaccess.h"
+#include "kvm-s390.h"
+
+static int handle_set_prefix(struct kvm_vcpu *vcpu)
+{
+ int base2 = vcpu->arch.sie_block->ipb >> 28;
+ int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ u64 operand2;
+ u32 address = 0;
+ u8 tmp;
+
+ vcpu->stat.instruction_spx++;
+
+ operand2 = disp2;
+ if (base2)
+ operand2 += vcpu->arch.guest_gprs[base2];
+
+ /* must be word boundary */
+ if (operand2 & 3) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ /* get the value */
+ if (get_guest_u32(vcpu, operand2, &address)) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ goto out;
+ }
+
+ address = address & 0x7fffe000u;
+
+ /* make sure that the new value is valid memory */
+ if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
+ (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ goto out;
+ }
+
+ vcpu->arch.sie_block->prefix = address;
+ vcpu->arch.sie_block->ihcpu = 0xffff;
+
+ VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
+out:
+ return 0;
+}
+
+static int handle_store_prefix(struct kvm_vcpu *vcpu)
+{
+ int base2 = vcpu->arch.sie_block->ipb >> 28;
+ int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ u64 operand2;
+ u32 address;
+
+ vcpu->stat.instruction_stpx++;
+ operand2 = disp2;
+ if (base2)
+ operand2 += vcpu->arch.guest_gprs[base2];
+
+ /* must be word boundary */
+ if (operand2 & 3) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ address = vcpu->arch.sie_block->prefix;
+ address = address & 0x7fffe000u;
+
+ /* get the value */
+ if (put_guest_u32(vcpu, operand2, address)) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ goto out;
+ }
+
+ VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
+out:
+ return 0;
+}
+
+static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
+{
+ int base2 = vcpu->arch.sie_block->ipb >> 28;
+ int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ u64 useraddr;
+ int rc;
+
+ vcpu->stat.instruction_stap++;
+ useraddr = disp2;
+ if (base2)
+ useraddr += vcpu->arch.guest_gprs[base2];
+
+ if (useraddr & 1) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id);
+ if (rc == -EFAULT) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ goto out;
+ }
+
+ VCPU_EVENT(vcpu, 5, "storing cpu address to %lx", useraddr);
+out:
+ return 0;
+}
+
+static int handle_skey(struct kvm_vcpu *vcpu)
+{
+ vcpu->stat.instruction_storage_key++;
+ vcpu->arch.sie_block->gpsw.addr -= 4;
+ VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
+ return 0;
+}
+
+static int handle_stsch(struct kvm_vcpu *vcpu)
+{
+ vcpu->stat.instruction_stsch++;
+ VCPU_EVENT(vcpu, 4, "%s", "store subchannel - CC3");
+ /* condition code 3 */
+ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+ vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
+ return 0;
+}
+
+static int handle_chsc(struct kvm_vcpu *vcpu)
+{
+ vcpu->stat.instruction_chsc++;
+ VCPU_EVENT(vcpu, 4, "%s", "channel subsystem call - CC3");
+ /* condition code 3 */
+ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+ vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
+ return 0;
+}
+
+static unsigned int kvm_stfl(void)
+{
+ asm volatile(
+ " .insn s,0xb2b10000,0(0)\n" /* stfl */
+ "0:\n"
+ EX_TABLE(0b, 0b));
+ return S390_lowcore.stfl_fac_list;
+}
+
+static int handle_stfl(struct kvm_vcpu *vcpu)
+{
+ unsigned int facility_list = kvm_stfl();
+ int rc;
+
+ vcpu->stat.instruction_stfl++;
+ facility_list &= ~(1UL<<24); /* no stfle */
+
+ rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
+ &facility_list, sizeof(facility_list));
+ if (rc == -EFAULT)
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ else
+ VCPU_EVENT(vcpu, 5, "store facility list value %x",
+ facility_list);
+ return 0;
+}
+
+static int handle_stidp(struct kvm_vcpu *vcpu)
+{
+ int base2 = vcpu->arch.sie_block->ipb >> 28;
+ int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ u64 operand2;
+ int rc;
+
+ vcpu->stat.instruction_stidp++;
+ operand2 = disp2;
+ if (base2)
+ operand2 += vcpu->arch.guest_gprs[base2];
+
+ if (operand2 & 7) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data);
+ if (rc == -EFAULT) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ goto out;
+ }
+
+ VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
+out:
+ return 0;
+}
+
+static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
+{
+ struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ int cpus = 0;
+ int n;
+
+ spin_lock_bh(&fi->lock);
+ for (n = 0; n < KVM_MAX_VCPUS; n++)
+ if (fi->local_int[n])
+ cpus++;
+ spin_unlock_bh(&fi->lock);
+
+ /* deal with other level 3 hypervisors */
+ if (stsi(mem, 3, 2, 2) == -ENOSYS)
+ mem->count = 0;
+ if (mem->count < 8)
+ mem->count++;
+ for (n = mem->count - 1; n > 0 ; n--)
+ memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
+
+ mem->vm[0].cpus_total = cpus;
+ mem->vm[0].cpus_configured = cpus;
+ mem->vm[0].cpus_standby = 0;
+ mem->vm[0].cpus_reserved = 0;
+ mem->vm[0].caf = 1000;
+ memcpy(mem->vm[0].name, "KVMguest", 8);
+ ASCEBC(mem->vm[0].name, 8);
+ memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
+ ASCEBC(mem->vm[0].cpi, 16);
+}
+
+static int handle_stsi(struct kvm_vcpu *vcpu)
+{
+ int fc = (vcpu->arch.guest_gprs[0] & 0xf0000000) >> 28;
+ int sel1 = vcpu->arch.guest_gprs[0] & 0xff;
+ int sel2 = vcpu->arch.guest_gprs[1] & 0xffff;
+ int base2 = vcpu->arch.sie_block->ipb >> 28;
+ int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ u64 operand2;
+ unsigned long mem;
+
+ vcpu->stat.instruction_stsi++;
+ VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
+
+ operand2 = disp2;
+ if (base2)
+ operand2 += vcpu->arch.guest_gprs[base2];
+
+ if (operand2 & 0xfff && fc > 0)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
+ switch (fc) {
+ case 0:
+ vcpu->arch.guest_gprs[0] = 3 << 28;
+ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+ return 0;
+ case 1: /* same handling for 1 and 2 */
+ case 2:
+ mem = get_zeroed_page(GFP_KERNEL);
+ if (!mem)
+ goto out_fail;
+ if (stsi((void *) mem, fc, sel1, sel2) == -ENOSYS)
+ goto out_mem;
+ break;
+ case 3:
+ if (sel1 != 2 || sel2 != 2)
+ goto out_fail;
+ mem = get_zeroed_page(GFP_KERNEL);
+ if (!mem)
+ goto out_fail;
+ handle_stsi_3_2_2(vcpu, (void *) mem);
+ break;
+ default:
+ goto out_fail;
+ }
+
+ if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ goto out_mem;
+ }
+ free_page(mem);
+ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+ vcpu->arch.guest_gprs[0] = 0;
+ return 0;
+out_mem:
+ free_page(mem);
+out_fail:
+ /* condition code 3 */
+ vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
+ return 0;
+}
+
+static intercept_handler_t priv_handlers[256] = {
+ [0x02] = handle_stidp,
+ [0x10] = handle_set_prefix,
+ [0x11] = handle_store_prefix,
+ [0x12] = handle_store_cpu_address,
+ [0x29] = handle_skey,
+ [0x2a] = handle_skey,
+ [0x2b] = handle_skey,
+ [0x34] = handle_stsch,
+ [0x5f] = handle_chsc,
+ [0x7d] = handle_stsi,
+ [0xb1] = handle_stfl,
+};
+
+int kvm_s390_handle_priv(struct kvm_vcpu *vcpu)
+{
+ intercept_handler_t handler;
+
+ handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
+ if (handler)
+ return handler(vcpu);
+ return -ENOTSUPP;
+}
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
new file mode 100644
index 000000000000..934fd6a885f6
--- /dev/null
+++ b/arch/s390/kvm/sie64a.S
@@ -0,0 +1,47 @@
+/*
+ * sie64a.S - low level sie call
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#include <linux/errno.h>
+#include <asm/asm-offsets.h>
+
+SP_R5 = 5 * 8 # offset into stackframe
+SP_R6 = 6 * 8
+
+/*
+ * sie64a calling convention:
+ * %r2 pointer to sie control block
+ * %r3 guest register save area
+ */
+ .globl sie64a
+sie64a:
+ lgr %r5,%r3
+ stmg %r5,%r14,SP_R5(%r15) # save register on entry
+ lgr %r14,%r2 # pointer to sie control block
+ lmg %r0,%r13,0(%r3) # load guest gprs 0-13
+sie_inst:
+ sie 0(%r14)
+ lg %r14,SP_R5(%r15)
+ stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ lghi %r2,0
+ lmg %r6,%r14,SP_R6(%r15)
+ br %r14
+
+sie_err:
+ lg %r14,SP_R5(%r15)
+ stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ lghi %r2,-EFAULT
+ lmg %r6,%r14,SP_R6(%r15)
+ br %r14
+
+ .section __ex_table,"a"
+ .quad sie_inst,sie_err
+ .previous
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
new file mode 100644
index 000000000000..0a236acfb5f6
--- /dev/null
+++ b/arch/s390/kvm/sigp.c
@@ -0,0 +1,288 @@
+/*
+ * sigp.c - handlinge interprocessor communication
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include "gaccess.h"
+#include "kvm-s390.h"
+
+/* sigp order codes */
+#define SIGP_SENSE 0x01
+#define SIGP_EXTERNAL_CALL 0x02
+#define SIGP_EMERGENCY 0x03
+#define SIGP_START 0x04
+#define SIGP_STOP 0x05
+#define SIGP_RESTART 0x06
+#define SIGP_STOP_STORE_STATUS 0x09
+#define SIGP_INITIAL_CPU_RESET 0x0b
+#define SIGP_CPU_RESET 0x0c
+#define SIGP_SET_PREFIX 0x0d
+#define SIGP_STORE_STATUS_ADDR 0x0e
+#define SIGP_SET_ARCH 0x12
+
+/* cpu status bits */
+#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
+#define SIGP_STAT_INCORRECT_STATE 0x00000200UL
+#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
+#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
+#define SIGP_STAT_STOPPED 0x00000040UL
+#define SIGP_STAT_OPERATOR_INTERV 0x00000020UL
+#define SIGP_STAT_CHECK_STOP 0x00000010UL
+#define SIGP_STAT_INOPERATIVE 0x00000004UL
+#define SIGP_STAT_INVALID_ORDER 0x00000002UL
+#define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
+
+
+static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg)
+{
+ struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ int rc;
+
+ if (cpu_addr >= KVM_MAX_VCPUS)
+ return 3; /* not operational */
+
+ spin_lock_bh(&fi->lock);
+ if (fi->local_int[cpu_addr] == NULL)
+ rc = 3; /* not operational */
+ else if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
+ & CPUSTAT_RUNNING) {
+ *reg &= 0xffffffff00000000UL;
+ rc = 1; /* status stored */
+ } else {
+ *reg &= 0xffffffff00000000UL;
+ *reg |= SIGP_STAT_STOPPED;
+ rc = 1; /* status stored */
+ }
+ spin_unlock_bh(&fi->lock);
+
+ VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
+ return rc;
+}
+
+static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
+{
+ struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct local_interrupt *li;
+ struct interrupt_info *inti;
+ int rc;
+
+ if (cpu_addr >= KVM_MAX_VCPUS)
+ return 3; /* not operational */
+
+ inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+ if (!inti)
+ return -ENOMEM;
+
+ inti->type = KVM_S390_INT_EMERGENCY;
+
+ spin_lock_bh(&fi->lock);
+ li = fi->local_int[cpu_addr];
+ if (li == NULL) {
+ rc = 3; /* not operational */
+ kfree(inti);
+ goto unlock;
+ }
+ spin_lock_bh(&li->lock);
+ list_add_tail(&inti->list, &li->list);
+ atomic_set(&li->active, 1);
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ if (waitqueue_active(&li->wq))
+ wake_up_interruptible(&li->wq);
+ spin_unlock_bh(&li->lock);
+ rc = 0; /* order accepted */
+unlock:
+ spin_unlock_bh(&fi->lock);
+ VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
+ return rc;
+}
+
+static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
+{
+ struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct local_interrupt *li;
+ struct interrupt_info *inti;
+ int rc;
+
+ if (cpu_addr >= KVM_MAX_VCPUS)
+ return 3; /* not operational */
+
+ inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+ if (!inti)
+ return -ENOMEM;
+
+ inti->type = KVM_S390_SIGP_STOP;
+
+ spin_lock_bh(&fi->lock);
+ li = fi->local_int[cpu_addr];
+ if (li == NULL) {
+ rc = 3; /* not operational */
+ kfree(inti);
+ goto unlock;
+ }
+ spin_lock_bh(&li->lock);
+ list_add_tail(&inti->list, &li->list);
+ atomic_set(&li->active, 1);
+ atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
+ if (store)
+ li->action_bits |= ACTION_STORE_ON_STOP;
+ li->action_bits |= ACTION_STOP_ON_STOP;
+ if (waitqueue_active(&li->wq))
+ wake_up_interruptible(&li->wq);
+ spin_unlock_bh(&li->lock);
+ rc = 0; /* order accepted */
+unlock:
+ spin_unlock_bh(&fi->lock);
+ VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
+ return rc;
+}
+
+static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
+{
+ int rc;
+
+ switch (parameter & 0xff) {
+ case 0:
+ printk(KERN_WARNING "kvm: request to switch to ESA/390 mode"
+ " not supported");
+ rc = 3; /* not operational */
+ break;
+ case 1:
+ case 2:
+ rc = 0; /* order accepted */
+ break;
+ default:
+ rc = -ENOTSUPP;
+ }
+ return rc;
+}
+
+static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
+ u64 *reg)
+{
+ struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct local_interrupt *li;
+ struct interrupt_info *inti;
+ int rc;
+ u8 tmp;
+
+ /* make sure that the new value is valid memory */
+ address = address & 0x7fffe000u;
+ if ((copy_from_guest(vcpu, &tmp,
+ (u64) (address + vcpu->kvm->arch.guest_origin) , 1)) ||
+ (copy_from_guest(vcpu, &tmp, (u64) (address +
+ vcpu->kvm->arch.guest_origin + PAGE_SIZE), 1))) {
+ *reg |= SIGP_STAT_INVALID_PARAMETER;
+ return 1; /* invalid parameter */
+ }
+
+ inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+ if (!inti)
+ return 2; /* busy */
+
+ spin_lock_bh(&fi->lock);
+ li = fi->local_int[cpu_addr];
+
+ if ((cpu_addr >= KVM_MAX_VCPUS) || (li == NULL)) {
+ rc = 1; /* incorrect state */
+ *reg &= SIGP_STAT_INCORRECT_STATE;
+ kfree(inti);
+ goto out_fi;
+ }
+
+ spin_lock_bh(&li->lock);
+ /* cpu must be in stopped state */
+ if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
+ rc = 1; /* incorrect state */
+ *reg &= SIGP_STAT_INCORRECT_STATE;
+ kfree(inti);
+ goto out_li;
+ }
+
+ inti->type = KVM_S390_SIGP_SET_PREFIX;
+ inti->prefix.address = address;
+
+ list_add_tail(&inti->list, &li->list);
+ atomic_set(&li->active, 1);
+ if (waitqueue_active(&li->wq))
+ wake_up_interruptible(&li->wq);
+ rc = 0; /* order accepted */
+
+ VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
+out_li:
+ spin_unlock_bh(&li->lock);
+out_fi:
+ spin_unlock_bh(&fi->lock);
+ return rc;
+}
+
+int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
+{
+ int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
+ int r3 = vcpu->arch.sie_block->ipa & 0x000f;
+ int base2 = vcpu->arch.sie_block->ipb >> 28;
+ int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ u32 parameter;
+ u16 cpu_addr = vcpu->arch.guest_gprs[r3];
+ u8 order_code;
+ int rc;
+
+ order_code = disp2;
+ if (base2)
+ order_code += vcpu->arch.guest_gprs[base2];
+
+ if (r1 % 2)
+ parameter = vcpu->arch.guest_gprs[r1];
+ else
+ parameter = vcpu->arch.guest_gprs[r1 + 1];
+
+ switch (order_code) {
+ case SIGP_SENSE:
+ vcpu->stat.instruction_sigp_sense++;
+ rc = __sigp_sense(vcpu, cpu_addr,
+ &vcpu->arch.guest_gprs[r1]);
+ break;
+ case SIGP_EMERGENCY:
+ vcpu->stat.instruction_sigp_emergency++;
+ rc = __sigp_emergency(vcpu, cpu_addr);
+ break;
+ case SIGP_STOP:
+ vcpu->stat.instruction_sigp_stop++;
+ rc = __sigp_stop(vcpu, cpu_addr, 0);
+ break;
+ case SIGP_STOP_STORE_STATUS:
+ vcpu->stat.instruction_sigp_stop++;
+ rc = __sigp_stop(vcpu, cpu_addr, 1);
+ break;
+ case SIGP_SET_ARCH:
+ vcpu->stat.instruction_sigp_arch++;
+ rc = __sigp_set_arch(vcpu, parameter);
+ break;
+ case SIGP_SET_PREFIX:
+ vcpu->stat.instruction_sigp_prefix++;
+ rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
+ &vcpu->arch.guest_gprs[r1]);
+ break;
+ case SIGP_RESTART:
+ vcpu->stat.instruction_sigp_restart++;
+ /* user space must know about restart */
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (rc < 0)
+ return rc;
+
+ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+ vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44;
+ return 0;
+}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index fd072013f88c..5c1aea97cd12 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -30,11 +30,27 @@
#define TABLES_PER_PAGE 4
#define FRAG_MASK 15UL
#define SECOND_HALVES 10UL
+
+void clear_table_pgstes(unsigned long *table)
+{
+ clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
+ memset(table + 256, 0, PAGE_SIZE/4);
+ clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
+ memset(table + 768, 0, PAGE_SIZE/4);
+}
+
#else
#define ALLOC_ORDER 2
#define TABLES_PER_PAGE 2
#define FRAG_MASK 3UL
#define SECOND_HALVES 2UL
+
+void clear_table_pgstes(unsigned long *table)
+{
+ clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
+ memset(table + 256, 0, PAGE_SIZE/2);
+}
+
#endif
unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
@@ -153,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
unsigned long *table;
unsigned long bits;
- bits = mm->context.noexec ? 3UL : 1UL;
+ bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL;
spin_lock(&mm->page_table_lock);
page = NULL;
if (!list_empty(&mm->context.pgtable_list)) {
@@ -170,7 +186,10 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
pgtable_page_ctor(page);
page->flags &= ~FRAG_MASK;
table = (unsigned long *) page_to_phys(page);
- clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
+ if (mm->context.pgstes)
+ clear_table_pgstes(table);
+ else
+ clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
spin_lock(&mm->page_table_lock);
list_add(&page->lru, &mm->context.pgtable_list);
}
@@ -191,7 +210,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
struct page *page;
unsigned long bits;
- bits = mm->context.noexec ? 3UL : 1UL;
+ bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock(&mm->page_table_lock);
@@ -228,3 +247,43 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
mm->context.noexec = 0;
update_mm(mm, tsk);
}
+
+/*
+ * switch on pgstes for its userspace process (for kvm)
+ */
+int s390_enable_sie(void)
+{
+ struct task_struct *tsk = current;
+ struct mm_struct *mm;
+ int rc;
+
+ task_lock(tsk);
+
+ rc = 0;
+ if (tsk->mm->context.pgstes)
+ goto unlock;
+
+ rc = -EINVAL;
+ if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
+ tsk->mm != tsk->active_mm || tsk->mm->ioctx_list)
+ goto unlock;
+
+ tsk->mm->context.pgstes = 1; /* dirty little tricks .. */
+ mm = dup_mm(tsk);
+ tsk->mm->context.pgstes = 0;
+
+ rc = -ENOMEM;
+ if (!mm)
+ goto unlock;
+ mmput(tsk->mm);
+ tsk->mm = tsk->active_mm = mm;
+ preempt_disable();
+ update_mm(mm, tsk);
+ cpu_set(smp_processor_id(), mm->cpu_vm_mask);
+ preempt_enable();
+ rc = 0;
+unlock:
+ task_unlock(tsk);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(s390_enable_sie);
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 53dde0607362..d7df26bd1e54 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -307,15 +307,6 @@ void free_initrd_mem(unsigned long start, unsigned long end)
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
-void online_page(struct page *page)
-{
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalram_pages++;
- num_physpages++;
-}
-
int arch_add_memory(int nid, u64 start, u64 size)
{
pg_data_t *pgdat;
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 177d8aaeec42..8c2b50e8abc6 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1699,9 +1699,21 @@ void __init paging_init(void)
* functions like clear_dcache_dirty_cpu use the cpu mask
* in 13-bit signed-immediate instruction fields.
*/
- BUILD_BUG_ON(FLAGS_RESERVED != 32);
+
+ /*
+ * Page flags must not reach into upper 32 bits that are used
+ * for the cpu number
+ */
+ BUILD_BUG_ON(NR_PAGEFLAGS > 32);
+
+ /*
+ * The bit fields placed in the high range must not reach below
+ * the 32 bit boundary. Otherwise we cannot place the cpu field
+ * at the 32 bit boundary.
+ */
BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
- ilog2(roundup_pow_of_two(NR_CPUS)) > FLAGS_RESERVED);
+ ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
+
BUILD_BUG_ON(NR_CPUS > 4096);
kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index db3082b4da46..6e51424745ab 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -125,7 +125,7 @@ static int open_one_chan(struct chan *chan)
return 0;
}
-int open_chan(struct list_head *chans)
+static int open_chan(struct list_head *chans)
{
struct list_head *ele;
struct chan *chan;
@@ -583,19 +583,6 @@ int parse_chan_pair(char *str, struct line *line, int device,
return 0;
}
-int chan_out_fd(struct list_head *chans)
-{
- struct list_head *ele;
- struct chan *chan;
-
- list_for_each(ele, chans) {
- chan = list_entry(ele, struct chan, list);
- if (chan->primary && chan->output)
- return chan->fd;
- }
- return -1;
-}
-
void chan_interrupt(struct list_head *chans, struct delayed_work *task,
struct tty_struct *tty, int irq)
{
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 2c898c4d6b6a..10b86e1cc659 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -304,7 +304,7 @@ int line_ioctl(struct tty_struct *tty, struct file * file,
break;
if (i == ARRAY_SIZE(tty_ioctls)) {
printk(KERN_ERR "%s: %s: unknown ioctl: 0x%x\n",
- __FUNCTION__, tty->name, cmd);
+ __func__, tty->name, cmd);
}
ret = -ENOIOCTLCMD;
break;
diff --git a/arch/um/drivers/mcast_kern.c b/arch/um/drivers/mcast_kern.c
index 822092f149be..8c4378a76d63 100644
--- a/arch/um/drivers/mcast_kern.c
+++ b/arch/um/drivers/mcast_kern.c
@@ -58,7 +58,7 @@ static const struct net_kern_info mcast_kern_info = {
.write = mcast_write,
};
-int mcast_setup(char *str, char **mac_out, void *data)
+static int mcast_setup(char *str, char **mac_out, void *data)
{
struct mcast_init *init = data;
char *port_str = NULL, *ttl_str = NULL, *remain;
diff --git a/arch/um/drivers/mconsole_user.c b/arch/um/drivers/mconsole_user.c
index 13af2f03ed84..f8cf4c8bedef 100644
--- a/arch/um/drivers/mconsole_user.c
+++ b/arch/um/drivers/mconsole_user.c
@@ -39,7 +39,7 @@ static struct mconsole_command commands[] = {
/* Initialized in mconsole_init, which is an initcall */
char mconsole_socket_name[256];
-int mconsole_reply_v0(struct mc_request *req, char *reply)
+static int mconsole_reply_v0(struct mc_request *req, char *reply)
{
struct iovec iov;
struct msghdr msg;
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 1d43bdfc20c4..5b4ca8d93682 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -116,7 +116,7 @@ static void uml_dev_close(struct work_struct *work)
dev_close(lp->dev);
}
-irqreturn_t uml_net_interrupt(int irq, void *dev_id)
+static irqreturn_t uml_net_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct uml_net_private *lp = dev->priv;
@@ -296,7 +296,7 @@ static struct ethtool_ops uml_net_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
-void uml_net_user_timer_expire(unsigned long _conn)
+static void uml_net_user_timer_expire(unsigned long _conn)
{
#ifdef undef
struct connection *conn = (struct connection *)_conn;
@@ -786,7 +786,7 @@ static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
}
/* uml_net_init shouldn't be called twice on two CPUs at the same time */
-struct notifier_block uml_inetaddr_notifier = {
+static struct notifier_block uml_inetaddr_notifier = {
.notifier_call = uml_inetaddr_event,
};
diff --git a/arch/um/drivers/port_user.c b/arch/um/drivers/port_user.c
index addd75902656..d269ca387f10 100644
--- a/arch/um/drivers/port_user.c
+++ b/arch/um/drivers/port_user.c
@@ -153,7 +153,7 @@ struct port_pre_exec_data {
int pipe_fd;
};
-void port_pre_exec(void *arg)
+static void port_pre_exec(void *arg)
{
struct port_pre_exec_data *data = arg;
diff --git a/arch/um/drivers/slip_kern.c b/arch/um/drivers/slip_kern.c
index 6b4a0f9e38de..d19faec7046e 100644
--- a/arch/um/drivers/slip_kern.c
+++ b/arch/um/drivers/slip_kern.c
@@ -13,7 +13,7 @@ struct slip_init {
char *gate_addr;
};
-void slip_init(struct net_device *dev, void *data)
+static void slip_init(struct net_device *dev, void *data)
{
struct uml_net_private *private;
struct slip_data *spri;
@@ -57,7 +57,7 @@ static int slip_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
(struct slip_data *) &lp->user);
}
-const struct net_kern_info slip_kern_info = {
+static const struct net_kern_info slip_kern_info = {
.init = slip_init,
.protocol = slip_protocol,
.read = slip_read,
diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c
index cec0c33cdd39..49266f6108c4 100644
--- a/arch/um/drivers/stdio_console.c
+++ b/arch/um/drivers/stdio_console.c
@@ -34,7 +34,7 @@
static struct tty_driver *console_driver;
-void stdio_announce(char *dev_name, int dev)
+static void stdio_announce(char *dev_name, int dev)
{
printk(KERN_INFO "Virtual console %d assigned device '%s'\n", dev,
dev_name);
@@ -158,7 +158,7 @@ static struct console stdiocons = {
.index = -1,
};
-int stdio_init(void)
+static int stdio_init(void)
{
char *new_title;
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index be3a2797dac4..5e45e39a8a8d 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -72,18 +72,6 @@ struct io_thread_req {
int error;
};
-extern int open_ubd_file(char *file, struct openflags *openflags, int shared,
- char **backing_file_out, int *bitmap_offset_out,
- unsigned long *bitmap_len_out, int *data_offset_out,
- int *create_cow_out);
-extern int create_cow_file(char *cow_file, char *backing_file,
- struct openflags flags, int sectorsize,
- int alignment, int *bitmap_offset_out,
- unsigned long *bitmap_len_out,
- int *data_offset_out);
-extern int read_cow_bitmap(int fd, void *buf, int offset, int len);
-extern void do_io(struct io_thread_req *req);
-
static inline int ubd_test_bit(__u64 bit, unsigned char *data)
{
__u64 n;
@@ -200,7 +188,7 @@ struct ubd {
}
/* Protected by ubd_lock */
-struct ubd ubd_devs[MAX_DEV] = { [ 0 ... MAX_DEV - 1 ] = DEFAULT_UBD };
+static struct ubd ubd_devs[MAX_DEV] = { [0 ... MAX_DEV - 1] = DEFAULT_UBD };
/* Only changed by fake_ide_setup which is a setup */
static int fake_ide = 0;
@@ -463,7 +451,7 @@ __uml_help(udb_setup,
static void do_ubd_request(struct request_queue * q);
/* Only changed by ubd_init, which is an initcall. */
-int thread_fd = -1;
+static int thread_fd = -1;
static void ubd_end_request(struct request *req, int bytes, int error)
{
@@ -531,7 +519,7 @@ static irqreturn_t ubd_intr(int irq, void *dev)
/* Only changed by ubd_init, which is an initcall. */
static int io_pid = -1;
-void kill_io_thread(void)
+static void kill_io_thread(void)
{
if(io_pid != -1)
os_kill_process(io_pid, 1);
@@ -547,6 +535,192 @@ static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out)
return os_file_size(file, size_out);
}
+static int read_cow_bitmap(int fd, void *buf, int offset, int len)
+{
+ int err;
+
+ err = os_seek_file(fd, offset);
+ if (err < 0)
+ return err;
+
+ err = os_read_file(fd, buf, len);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int backing_file_mismatch(char *file, __u64 size, time_t mtime)
+{
+ unsigned long modtime;
+ unsigned long long actual;
+ int err;
+
+ err = os_file_modtime(file, &modtime);
+ if (err < 0) {
+ printk(KERN_ERR "Failed to get modification time of backing "
+ "file \"%s\", err = %d\n", file, -err);
+ return err;
+ }
+
+ err = os_file_size(file, &actual);
+ if (err < 0) {
+ printk(KERN_ERR "Failed to get size of backing file \"%s\", "
+ "err = %d\n", file, -err);
+ return err;
+ }
+
+ if (actual != size) {
+ /*__u64 can be a long on AMD64 and with %lu GCC complains; so
+ * the typecast.*/
+ printk(KERN_ERR "Size mismatch (%llu vs %llu) of COW header "
+ "vs backing file\n", (unsigned long long) size, actual);
+ return -EINVAL;
+ }
+ if (modtime != mtime) {
+ printk(KERN_ERR "mtime mismatch (%ld vs %ld) of COW header vs "
+ "backing file\n", mtime, modtime);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int path_requires_switch(char *from_cmdline, char *from_cow, char *cow)
+{
+ struct uml_stat buf1, buf2;
+ int err;
+
+ if (from_cmdline == NULL)
+ return 0;
+ if (!strcmp(from_cmdline, from_cow))
+ return 0;
+
+ err = os_stat_file(from_cmdline, &buf1);
+ if (err < 0) {
+ printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cmdline,
+ -err);
+ return 0;
+ }
+ err = os_stat_file(from_cow, &buf2);
+ if (err < 0) {
+ printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cow,
+ -err);
+ return 1;
+ }
+ if ((buf1.ust_dev == buf2.ust_dev) && (buf1.ust_ino == buf2.ust_ino))
+ return 0;
+
+ printk(KERN_ERR "Backing file mismatch - \"%s\" requested, "
+ "\"%s\" specified in COW header of \"%s\"\n",
+ from_cmdline, from_cow, cow);
+ return 1;
+}
+
+static int open_ubd_file(char *file, struct openflags *openflags, int shared,
+ char **backing_file_out, int *bitmap_offset_out,
+ unsigned long *bitmap_len_out, int *data_offset_out,
+ int *create_cow_out)
+{
+ time_t mtime;
+ unsigned long long size;
+ __u32 version, align;
+ char *backing_file;
+ int fd, err, sectorsize, asked_switch, mode = 0644;
+
+ fd = os_open_file(file, *openflags, mode);
+ if (fd < 0) {
+ if ((fd == -ENOENT) && (create_cow_out != NULL))
+ *create_cow_out = 1;
+ if (!openflags->w ||
+ ((fd != -EROFS) && (fd != -EACCES)))
+ return fd;
+ openflags->w = 0;
+ fd = os_open_file(file, *openflags, mode);
+ if (fd < 0)
+ return fd;
+ }
+
+ if (shared)
+ printk(KERN_INFO "Not locking \"%s\" on the host\n", file);
+ else {
+ err = os_lock_file(fd, openflags->w);
+ if (err < 0) {
+ printk(KERN_ERR "Failed to lock '%s', err = %d\n",
+ file, -err);
+ goto out_close;
+ }
+ }
+
+ /* Successful return case! */
+ if (backing_file_out == NULL)
+ return fd;
+
+ err = read_cow_header(file_reader, &fd, &version, &backing_file, &mtime,
+ &size, &sectorsize, &align, bitmap_offset_out);
+ if (err && (*backing_file_out != NULL)) {
+ printk(KERN_ERR "Failed to read COW header from COW file "
+ "\"%s\", errno = %d\n", file, -err);
+ goto out_close;
+ }
+ if (err)
+ return fd;
+
+ asked_switch = path_requires_switch(*backing_file_out, backing_file,
+ file);
+
+ /* Allow switching only if no mismatch. */
+ if (asked_switch && !backing_file_mismatch(*backing_file_out, size,
+ mtime)) {
+ printk(KERN_ERR "Switching backing file to '%s'\n",
+ *backing_file_out);
+ err = write_cow_header(file, fd, *backing_file_out,
+ sectorsize, align, &size);
+ if (err) {
+ printk(KERN_ERR "Switch failed, errno = %d\n", -err);
+ goto out_close;
+ }
+ } else {
+ *backing_file_out = backing_file;
+ err = backing_file_mismatch(*backing_file_out, size, mtime);
+ if (err)
+ goto out_close;
+ }
+
+ cow_sizes(version, size, sectorsize, align, *bitmap_offset_out,
+ bitmap_len_out, data_offset_out);
+
+ return fd;
+ out_close:
+ os_close_file(fd);
+ return err;
+}
+
+static int create_cow_file(char *cow_file, char *backing_file,
+ struct openflags flags,
+ int sectorsize, int alignment, int *bitmap_offset_out,
+ unsigned long *bitmap_len_out, int *data_offset_out)
+{
+ int err, fd;
+
+ flags.c = 1;
+ fd = open_ubd_file(cow_file, &flags, 0, NULL, NULL, NULL, NULL, NULL);
+ if (fd < 0) {
+ err = fd;
+ printk(KERN_ERR "Open of COW file '%s' failed, errno = %d\n",
+ cow_file, -err);
+ goto out;
+ }
+
+ err = init_cow_file(fd, cow_file, backing_file, sectorsize, alignment,
+ bitmap_offset_out, bitmap_len_out,
+ data_offset_out);
+ if (!err)
+ return fd;
+ os_close_file(fd);
+ out:
+ return err;
+}
+
static void ubd_close_dev(struct ubd *ubd_dev)
{
os_close_file(ubd_dev->fd);
@@ -1166,185 +1340,6 @@ static int ubd_ioctl(struct inode * inode, struct file * file,
return -EINVAL;
}
-static int path_requires_switch(char *from_cmdline, char *from_cow, char *cow)
-{
- struct uml_stat buf1, buf2;
- int err;
-
- if(from_cmdline == NULL)
- return 0;
- if(!strcmp(from_cmdline, from_cow))
- return 0;
-
- err = os_stat_file(from_cmdline, &buf1);
- if(err < 0){
- printk("Couldn't stat '%s', err = %d\n", from_cmdline, -err);
- return 0;
- }
- err = os_stat_file(from_cow, &buf2);
- if(err < 0){
- printk("Couldn't stat '%s', err = %d\n", from_cow, -err);
- return 1;
- }
- if((buf1.ust_dev == buf2.ust_dev) && (buf1.ust_ino == buf2.ust_ino))
- return 0;
-
- printk("Backing file mismatch - \"%s\" requested,\n"
- "\"%s\" specified in COW header of \"%s\"\n",
- from_cmdline, from_cow, cow);
- return 1;
-}
-
-static int backing_file_mismatch(char *file, __u64 size, time_t mtime)
-{
- unsigned long modtime;
- unsigned long long actual;
- int err;
-
- err = os_file_modtime(file, &modtime);
- if(err < 0){
- printk("Failed to get modification time of backing file "
- "\"%s\", err = %d\n", file, -err);
- return err;
- }
-
- err = os_file_size(file, &actual);
- if(err < 0){
- printk("Failed to get size of backing file \"%s\", "
- "err = %d\n", file, -err);
- return err;
- }
-
- if(actual != size){
- /*__u64 can be a long on AMD64 and with %lu GCC complains; so
- * the typecast.*/
- printk("Size mismatch (%llu vs %llu) of COW header vs backing "
- "file\n", (unsigned long long) size, actual);
- return -EINVAL;
- }
- if(modtime != mtime){
- printk("mtime mismatch (%ld vs %ld) of COW header vs backing "
- "file\n", mtime, modtime);
- return -EINVAL;
- }
- return 0;
-}
-
-int read_cow_bitmap(int fd, void *buf, int offset, int len)
-{
- int err;
-
- err = os_seek_file(fd, offset);
- if(err < 0)
- return err;
-
- err = os_read_file(fd, buf, len);
- if(err < 0)
- return err;
-
- return 0;
-}
-
-int open_ubd_file(char *file, struct openflags *openflags, int shared,
- char **backing_file_out, int *bitmap_offset_out,
- unsigned long *bitmap_len_out, int *data_offset_out,
- int *create_cow_out)
-{
- time_t mtime;
- unsigned long long size;
- __u32 version, align;
- char *backing_file;
- int fd, err, sectorsize, asked_switch, mode = 0644;
-
- fd = os_open_file(file, *openflags, mode);
- if (fd < 0) {
- if ((fd == -ENOENT) && (create_cow_out != NULL))
- *create_cow_out = 1;
- if (!openflags->w ||
- ((fd != -EROFS) && (fd != -EACCES)))
- return fd;
- openflags->w = 0;
- fd = os_open_file(file, *openflags, mode);
- if (fd < 0)
- return fd;
- }
-
- if(shared)
- printk("Not locking \"%s\" on the host\n", file);
- else {
- err = os_lock_file(fd, openflags->w);
- if(err < 0){
- printk("Failed to lock '%s', err = %d\n", file, -err);
- goto out_close;
- }
- }
-
- /* Successful return case! */
- if(backing_file_out == NULL)
- return fd;
-
- err = read_cow_header(file_reader, &fd, &version, &backing_file, &mtime,
- &size, &sectorsize, &align, bitmap_offset_out);
- if(err && (*backing_file_out != NULL)){
- printk("Failed to read COW header from COW file \"%s\", "
- "errno = %d\n", file, -err);
- goto out_close;
- }
- if(err)
- return fd;
-
- asked_switch = path_requires_switch(*backing_file_out, backing_file, file);
-
- /* Allow switching only if no mismatch. */
- if (asked_switch && !backing_file_mismatch(*backing_file_out, size, mtime)) {
- printk("Switching backing file to '%s'\n", *backing_file_out);
- err = write_cow_header(file, fd, *backing_file_out,
- sectorsize, align, &size);
- if (err) {
- printk("Switch failed, errno = %d\n", -err);
- goto out_close;
- }
- } else {
- *backing_file_out = backing_file;
- err = backing_file_mismatch(*backing_file_out, size, mtime);
- if (err)
- goto out_close;
- }
-
- cow_sizes(version, size, sectorsize, align, *bitmap_offset_out,
- bitmap_len_out, data_offset_out);
-
- return fd;
- out_close:
- os_close_file(fd);
- return err;
-}
-
-int create_cow_file(char *cow_file, char *backing_file, struct openflags flags,
- int sectorsize, int alignment, int *bitmap_offset_out,
- unsigned long *bitmap_len_out, int *data_offset_out)
-{
- int err, fd;
-
- flags.c = 1;
- fd = open_ubd_file(cow_file, &flags, 0, NULL, NULL, NULL, NULL, NULL);
- if(fd < 0){
- err = fd;
- printk("Open of COW file '%s' failed, errno = %d\n", cow_file,
- -err);
- goto out;
- }
-
- err = init_cow_file(fd, cow_file, backing_file, sectorsize, alignment,
- bitmap_offset_out, bitmap_len_out,
- data_offset_out);
- if(!err)
- return fd;
- os_close_file(fd);
- out:
- return err;
-}
-
static int update_bitmap(struct io_thread_req *req)
{
int n;
@@ -1369,7 +1364,7 @@ static int update_bitmap(struct io_thread_req *req)
return 0;
}
-void do_io(struct io_thread_req *req)
+static void do_io(struct io_thread_req *req)
{
char *buf;
unsigned long len;
diff --git a/arch/um/include/chan_kern.h b/arch/um/include/chan_kern.h
index 624b5100a3cd..1e651457e049 100644
--- a/arch/um/include/chan_kern.h
+++ b/arch/um/include/chan_kern.h
@@ -31,7 +31,6 @@ extern void chan_interrupt(struct list_head *chans, struct delayed_work *task,
struct tty_struct *tty, int irq);
extern int parse_chan_pair(char *str, struct line *line, int device,
const struct chan_opts *opts, char **error_out);
-extern int open_chan(struct list_head *chans);
extern int write_chan(struct list_head *chans, const char *buf, int len,
int write_irq);
extern int console_write_chan(struct list_head *chans, const char *buf,
@@ -45,7 +44,6 @@ extern void close_chan(struct list_head *chans, int delay_free_irq);
extern int chan_window_size(struct list_head *chans,
unsigned short *rows_out,
unsigned short *cols_out);
-extern int chan_out_fd(struct list_head *chans);
extern int chan_config_string(struct list_head *chans, char *str, int size,
char **error_out);
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index a6c1dd1cf5a1..56deed623446 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -115,7 +115,7 @@ static int have_root __initdata = 0;
/* Set in uml_mem_setup and modified in linux_main */
long long physmem_size = 32 * 1024 * 1024;
-static char *usage_string =
+static const char *usage_string =
"User Mode Linux v%s\n"
" available at http://user-mode-linux.sourceforge.net/\n\n";
@@ -202,7 +202,7 @@ static void __init uml_checksetup(char *line, int *add)
p = &__uml_setup_start;
while (p < &__uml_setup_end) {
- int n;
+ size_t n;
n = strlen(p->str);
if (!strncmp(line, p->str, n) && p->setup_func(line + n, add))
@@ -258,7 +258,8 @@ int __init linux_main(int argc, char **argv)
{
unsigned long avail, diff;
unsigned long virtmem_size, max_physmem;
- unsigned int i, add;
+ unsigned int i;
+ int add;
char * mode;
for (i = 1; i < argc; i++) {
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index b616e15638fb..997d01944f91 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -25,15 +25,15 @@
#include "registers.h"
#include "skas_ptrace.h"
-static int ptrace_child(void)
+static void ptrace_child(void)
{
int ret;
/* Calling os_getpid because some libcs cached getpid incorrectly */
int pid = os_getpid(), ppid = getppid();
int sc_result;
- change_sig(SIGWINCH, 0);
- if (ptrace(PTRACE_TRACEME, 0, 0, 0) < 0) {
+ if (change_sig(SIGWINCH, 0) < 0 ||
+ ptrace(PTRACE_TRACEME, 0, 0, 0) < 0) {
perror("ptrace");
kill(pid, SIGKILL);
}
@@ -75,9 +75,8 @@ static void fatal(char *fmt, ...)
va_list list;
va_start(list, fmt);
- vprintf(fmt, list);
+ vfprintf(stderr, fmt, list);
va_end(list);
- fflush(stdout);
exit(1);
}
@@ -87,9 +86,8 @@ static void non_fatal(char *fmt, ...)
va_list list;
va_start(list, fmt);
- vprintf(fmt, list);
+ vfprintf(stderr, fmt, list);
va_end(list);
- fflush(stdout);
}
static int start_ptraced_child(void)
@@ -495,7 +493,7 @@ int __init parse_iomem(char *str, int *add)
driver = str;
file = strchr(str,',');
if (file == NULL) {
- printf("parse_iomem : failed to parse iomem\n");
+ fprintf(stderr, "parse_iomem : failed to parse iomem\n");
goto out;
}
*file = '\0';
diff --git a/arch/um/os-Linux/sys-i386/task_size.c b/arch/um/os-Linux/sys-i386/task_size.c
index 48d211b3d9a1..ccb49b0aff59 100644
--- a/arch/um/os-Linux/sys-i386/task_size.c
+++ b/arch/um/os-Linux/sys-i386/task_size.c
@@ -88,7 +88,10 @@ unsigned long os_get_task_size(void)
sa.sa_handler = segfault;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_NODEFER;
- sigaction(SIGSEGV, &sa, &old);
+ if (sigaction(SIGSEGV, &sa, &old)) {
+ perror("os_get_task_size");
+ exit(1);
+ }
if (!page_ok(bottom)) {
fprintf(stderr, "Address 0x%x no good?\n",
@@ -110,11 +113,12 @@ unsigned long os_get_task_size(void)
out:
/* Restore the old SIGSEGV handling */
- sigaction(SIGSEGV, &old, NULL);
-
+ if (sigaction(SIGSEGV, &old, NULL)) {
+ perror("os_get_task_size");
+ exit(1);
+ }
top <<= UM_KERN_PAGE_SHIFT;
printf("0x%x\n", top);
- fflush(stdout);
return top;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 2fadf794483d..a12dbb2b93f3 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -86,9 +86,6 @@ config GENERIC_GPIO
config ARCH_MAY_HAVE_PC_FDC
def_bool y
-config DMI
- def_bool y
-
config RWSEM_GENERIC_SPINLOCK
def_bool !X86_XADD
@@ -114,6 +111,9 @@ config GENERIC_TIME_VSYSCALL
config ARCH_HAS_CPU_RELAX
def_bool y
+config ARCH_HAS_CACHE_LINE_SIZE
+ def_bool y
+
config HAVE_SETUP_PER_CPU_AREA
def_bool X86_64 || (X86_SMP && !X86_VOYAGER)
@@ -373,6 +373,25 @@ config VMI
at the moment), by linking the kernel to a GPL-ed ROM module
provided by the hypervisor.
+config KVM_CLOCK
+ bool "KVM paravirtualized clock"
+ select PARAVIRT
+ depends on !(X86_VISWS || X86_VOYAGER)
+ help
+ Turning on this option will allow you to run a paravirtualized clock
+ when running over the KVM hypervisor. Instead of relying on a PIT
+ (or probably other) emulation by the underlying device model, the host
+ provides the guest with timing infrastructure such as time of day, and
+ system time
+
+config KVM_GUEST
+ bool "KVM Guest support"
+ select PARAVIRT
+ depends on !(X86_VISWS || X86_VOYAGER)
+ help
+ This option enables various optimizations for running under the KVM
+ hypervisor.
+
source "arch/x86/lguest/Kconfig"
config PARAVIRT
@@ -463,6 +482,15 @@ config HPET_EMULATE_RTC
# Mark as embedded because too many people got it wrong.
# The code disables itself when not needed.
+config DMI
+ default y
+ bool "Enable DMI scanning" if EMBEDDED
+ help
+ Enabled scanning of DMI to identify machine quirks. Say Y
+ here unless you have verified that your setup is not
+ affected by entries in the DMI blacklist. Required by PNP
+ BIOS code.
+
config GART_IOMMU
bool "GART IOMMU support" if EMBEDDED
default y
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 90e092d0af0c..fa19c3819540 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -80,6 +80,8 @@ obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o
obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o
+obj-$(CONFIG_KVM_GUEST) += kvm.o
+obj-$(CONFIG_KVM_CLOCK) += kvmclock.o
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o
ifdef CONFIG_INPUT_PCSPKR
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index f0030a0999c7..e4ea362e8480 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -904,6 +904,7 @@ recalc:
original_pm_idle();
else
default_idle();
+ local_irq_disable();
jiffies_since_last_check = jiffies - last_jiffies;
if (jiffies_since_last_check > idle_period)
goto recalc;
@@ -911,6 +912,8 @@ recalc:
if (apm_idle_done)
apm_do_busy();
+
+ local_irq_enable();
}
/**
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 2251d0ae9570..268553817909 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -25,6 +25,7 @@
#include <asm/hpet.h>
#include <linux/kdebug.h>
#include <asm/smp.h>
+#include <asm/reboot.h>
#include <mach_ipi.h>
@@ -117,7 +118,7 @@ static void nmi_shootdown_cpus(void)
}
#endif
-void machine_crash_shutdown(struct pt_regs *regs)
+void native_machine_crash_shutdown(struct pt_regs *regs)
{
/* This function is only called after the system
* has panicked or is otherwise in a critical state.
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
new file mode 100644
index 000000000000..8b7a3cf37d2b
--- /dev/null
+++ b/arch/x86/kernel/kvm.c
@@ -0,0 +1,248 @@
+/*
+ * KVM paravirt_ops implementation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright IBM Corporation, 2007
+ * Authors: Anthony Liguori <aliguori@us.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kvm_para.h>
+#include <linux/cpu.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/hardirq.h>
+
+#define MMU_QUEUE_SIZE 1024
+
+struct kvm_para_state {
+ u8 mmu_queue[MMU_QUEUE_SIZE];
+ int mmu_queue_len;
+ enum paravirt_lazy_mode mode;
+};
+
+static DEFINE_PER_CPU(struct kvm_para_state, para_state);
+
+static struct kvm_para_state *kvm_para_state(void)
+{
+ return &per_cpu(para_state, raw_smp_processor_id());
+}
+
+/*
+ * No need for any "IO delay" on KVM
+ */
+static void kvm_io_delay(void)
+{
+}
+
+static void kvm_mmu_op(void *buffer, unsigned len)
+{
+ int r;
+ unsigned long a1, a2;
+
+ do {
+ a1 = __pa(buffer);
+ a2 = 0; /* on i386 __pa() always returns <4G */
+ r = kvm_hypercall3(KVM_HC_MMU_OP, len, a1, a2);
+ buffer += r;
+ len -= r;
+ } while (len);
+}
+
+static void mmu_queue_flush(struct kvm_para_state *state)
+{
+ if (state->mmu_queue_len) {
+ kvm_mmu_op(state->mmu_queue, state->mmu_queue_len);
+ state->mmu_queue_len = 0;
+ }
+}
+
+static void kvm_deferred_mmu_op(void *buffer, int len)
+{
+ struct kvm_para_state *state = kvm_para_state();
+
+ if (state->mode != PARAVIRT_LAZY_MMU) {
+ kvm_mmu_op(buffer, len);
+ return;
+ }
+ if (state->mmu_queue_len + len > sizeof state->mmu_queue)
+ mmu_queue_flush(state);
+ memcpy(state->mmu_queue + state->mmu_queue_len, buffer, len);
+ state->mmu_queue_len += len;
+}
+
+static void kvm_mmu_write(void *dest, u64 val)
+{
+ __u64 pte_phys;
+ struct kvm_mmu_op_write_pte wpte;
+
+#ifdef CONFIG_HIGHPTE
+ struct page *page;
+ unsigned long dst = (unsigned long) dest;
+
+ page = kmap_atomic_to_page(dest);
+ pte_phys = page_to_pfn(page);
+ pte_phys <<= PAGE_SHIFT;
+ pte_phys += (dst & ~(PAGE_MASK));
+#else
+ pte_phys = (unsigned long)__pa(dest);
+#endif
+ wpte.header.op = KVM_MMU_OP_WRITE_PTE;
+ wpte.pte_val = val;
+ wpte.pte_phys = pte_phys;
+
+ kvm_deferred_mmu_op(&wpte, sizeof wpte);
+}
+
+/*
+ * We only need to hook operations that are MMU writes. We hook these so that
+ * we can use lazy MMU mode to batch these operations. We could probably
+ * improve the performance of the host code if we used some of the information
+ * here to simplify processing of batched writes.
+ */
+static void kvm_set_pte(pte_t *ptep, pte_t pte)
+{
+ kvm_mmu_write(ptep, pte_val(pte));
+}
+
+static void kvm_set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ kvm_mmu_write(ptep, pte_val(pte));
+}
+
+static void kvm_set_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+ kvm_mmu_write(pmdp, pmd_val(pmd));
+}
+
+#if PAGETABLE_LEVELS >= 3
+#ifdef CONFIG_X86_PAE
+static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte)
+{
+ kvm_mmu_write(ptep, pte_val(pte));
+}
+
+static void kvm_set_pte_present(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ kvm_mmu_write(ptep, pte_val(pte));
+}
+
+static void kvm_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ kvm_mmu_write(ptep, 0);
+}
+
+static void kvm_pmd_clear(pmd_t *pmdp)
+{
+ kvm_mmu_write(pmdp, 0);
+}
+#endif
+
+static void kvm_set_pud(pud_t *pudp, pud_t pud)
+{
+ kvm_mmu_write(pudp, pud_val(pud));
+}
+
+#if PAGETABLE_LEVELS == 4
+static void kvm_set_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+ kvm_mmu_write(pgdp, pgd_val(pgd));
+}
+#endif
+#endif /* PAGETABLE_LEVELS >= 3 */
+
+static void kvm_flush_tlb(void)
+{
+ struct kvm_mmu_op_flush_tlb ftlb = {
+ .header.op = KVM_MMU_OP_FLUSH_TLB,
+ };
+
+ kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
+}
+
+static void kvm_release_pt(u32 pfn)
+{
+ struct kvm_mmu_op_release_pt rpt = {
+ .header.op = KVM_MMU_OP_RELEASE_PT,
+ .pt_phys = (u64)pfn << PAGE_SHIFT,
+ };
+
+ kvm_mmu_op(&rpt, sizeof rpt);
+}
+
+static void kvm_enter_lazy_mmu(void)
+{
+ struct kvm_para_state *state = kvm_para_state();
+
+ paravirt_enter_lazy_mmu();
+ state->mode = paravirt_get_lazy_mode();
+}
+
+static void kvm_leave_lazy_mmu(void)
+{
+ struct kvm_para_state *state = kvm_para_state();
+
+ mmu_queue_flush(state);
+ paravirt_leave_lazy(paravirt_get_lazy_mode());
+ state->mode = paravirt_get_lazy_mode();
+}
+
+static void paravirt_ops_setup(void)
+{
+ pv_info.name = "KVM";
+ pv_info.paravirt_enabled = 1;
+
+ if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
+ pv_cpu_ops.io_delay = kvm_io_delay;
+
+ if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) {
+ pv_mmu_ops.set_pte = kvm_set_pte;
+ pv_mmu_ops.set_pte_at = kvm_set_pte_at;
+ pv_mmu_ops.set_pmd = kvm_set_pmd;
+#if PAGETABLE_LEVELS >= 3
+#ifdef CONFIG_X86_PAE
+ pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic;
+ pv_mmu_ops.set_pte_present = kvm_set_pte_present;
+ pv_mmu_ops.pte_clear = kvm_pte_clear;
+ pv_mmu_ops.pmd_clear = kvm_pmd_clear;
+#endif
+ pv_mmu_ops.set_pud = kvm_set_pud;
+#if PAGETABLE_LEVELS == 4
+ pv_mmu_ops.set_pgd = kvm_set_pgd;
+#endif
+#endif
+ pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
+ pv_mmu_ops.release_pte = kvm_release_pt;
+ pv_mmu_ops.release_pmd = kvm_release_pt;
+ pv_mmu_ops.release_pud = kvm_release_pt;
+
+ pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu;
+ pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu;
+ }
+}
+
+void __init kvm_guest_init(void)
+{
+ if (!kvm_para_available())
+ return;
+
+ paravirt_ops_setup();
+}
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
new file mode 100644
index 000000000000..ddee04043aeb
--- /dev/null
+++ b/arch/x86/kernel/kvmclock.c
@@ -0,0 +1,187 @@
+/* KVM paravirtual clock driver. A clocksource implementation
+ Copyright (C) 2008 Glauber de Oliveira Costa, Red Hat Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#include <linux/clocksource.h>
+#include <linux/kvm_para.h>
+#include <asm/arch_hooks.h>
+#include <asm/msr.h>
+#include <asm/apic.h>
+#include <linux/percpu.h>
+#include <asm/reboot.h>
+
+#define KVM_SCALE 22
+
+static int kvmclock = 1;
+
+static int parse_no_kvmclock(char *arg)
+{
+ kvmclock = 0;
+ return 0;
+}
+early_param("no-kvmclock", parse_no_kvmclock);
+
+/* The hypervisor will put information about time periodically here */
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct kvm_vcpu_time_info, hv_clock);
+#define get_clock(cpu, field) per_cpu(hv_clock, cpu).field
+
+static inline u64 kvm_get_delta(u64 last_tsc)
+{
+ int cpu = smp_processor_id();
+ u64 delta = native_read_tsc() - last_tsc;
+ return (delta * get_clock(cpu, tsc_to_system_mul)) >> KVM_SCALE;
+}
+
+static struct kvm_wall_clock wall_clock;
+static cycle_t kvm_clock_read(void);
+/*
+ * The wallclock is the time of day when we booted. Since then, some time may
+ * have elapsed since the hypervisor wrote the data. So we try to account for
+ * that with system time
+ */
+unsigned long kvm_get_wallclock(void)
+{
+ u32 wc_sec, wc_nsec;
+ u64 delta;
+ struct timespec ts;
+ int version, nsec;
+ int low, high;
+
+ low = (int)__pa(&wall_clock);
+ high = ((u64)__pa(&wall_clock) >> 32);
+
+ delta = kvm_clock_read();
+
+ native_write_msr(MSR_KVM_WALL_CLOCK, low, high);
+ do {
+ version = wall_clock.wc_version;
+ rmb();
+ wc_sec = wall_clock.wc_sec;
+ wc_nsec = wall_clock.wc_nsec;
+ rmb();
+ } while ((wall_clock.wc_version != version) || (version & 1));
+
+ delta = kvm_clock_read() - delta;
+ delta += wc_nsec;
+ nsec = do_div(delta, NSEC_PER_SEC);
+ set_normalized_timespec(&ts, wc_sec + delta, nsec);
+ /*
+ * Of all mechanisms of time adjustment I've tested, this one
+ * was the champion!
+ */
+ return ts.tv_sec + 1;
+}
+
+int kvm_set_wallclock(unsigned long now)
+{
+ return 0;
+}
+
+/*
+ * This is our read_clock function. The host puts an tsc timestamp each time
+ * it updates a new time. Without the tsc adjustment, we can have a situation
+ * in which a vcpu starts to run earlier (smaller system_time), but probes
+ * time later (compared to another vcpu), leading to backwards time
+ */
+static cycle_t kvm_clock_read(void)
+{
+ u64 last_tsc, now;
+ int cpu;
+
+ preempt_disable();
+ cpu = smp_processor_id();
+
+ last_tsc = get_clock(cpu, tsc_timestamp);
+ now = get_clock(cpu, system_time);
+
+ now += kvm_get_delta(last_tsc);
+ preempt_enable();
+
+ return now;
+}
+static struct clocksource kvm_clock = {
+ .name = "kvm-clock",
+ .read = kvm_clock_read,
+ .rating = 400,
+ .mask = CLOCKSOURCE_MASK(64),
+ .mult = 1 << KVM_SCALE,
+ .shift = KVM_SCALE,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int kvm_register_clock(void)
+{
+ int cpu = smp_processor_id();
+ int low, high;
+ low = (int)__pa(&per_cpu(hv_clock, cpu)) | 1;
+ high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32);
+
+ return native_write_msr_safe(MSR_KVM_SYSTEM_TIME, low, high);
+}
+
+static void kvm_setup_secondary_clock(void)
+{
+ /*
+ * Now that the first cpu already had this clocksource initialized,
+ * we shouldn't fail.
+ */
+ WARN_ON(kvm_register_clock());
+ /* ok, done with our trickery, call native */
+ setup_secondary_APIC_clock();
+}
+
+/*
+ * After the clock is registered, the host will keep writing to the
+ * registered memory location. If the guest happens to shutdown, this memory
+ * won't be valid. In cases like kexec, in which you install a new kernel, this
+ * means a random memory location will be kept being written. So before any
+ * kind of shutdown from our side, we unregister the clock by writting anything
+ * that does not have the 'enable' bit set in the msr
+ */
+#ifdef CONFIG_KEXEC
+static void kvm_crash_shutdown(struct pt_regs *regs)
+{
+ native_write_msr_safe(MSR_KVM_SYSTEM_TIME, 0, 0);
+ native_machine_crash_shutdown(regs);
+}
+#endif
+
+static void kvm_shutdown(void)
+{
+ native_write_msr_safe(MSR_KVM_SYSTEM_TIME, 0, 0);
+ native_machine_shutdown();
+}
+
+void __init kvmclock_init(void)
+{
+ if (!kvm_para_available())
+ return;
+
+ if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
+ if (kvm_register_clock())
+ return;
+ pv_time_ops.get_wallclock = kvm_get_wallclock;
+ pv_time_ops.set_wallclock = kvm_set_wallclock;
+ pv_time_ops.sched_clock = kvm_clock_read;
+ pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock;
+ machine_ops.shutdown = kvm_shutdown;
+#ifdef CONFIG_KEXEC
+ machine_ops.crash_shutdown = kvm_crash_shutdown;
+#endif
+ clocksource_register(&kvm_clock);
+ }
+}
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index cfc2648d25ff..3cad17fe026b 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -63,7 +63,7 @@ static int __init mfgpt_fix(char *s)
/* The following udocumented bit resets the MFGPT timers */
val = 0xFF; dummy = 0;
- wrmsr(0x5140002B, val, dummy);
+ wrmsr(MSR_MFGPT_SETUP, val, dummy);
return 1;
}
__setup("mfgptfix", mfgpt_fix);
@@ -127,17 +127,17 @@ int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable)
* 6; that is, resets for 7 and 8 will be ignored. Is this
* a problem? -dilinger
*/
- msr = MFGPT_NR_MSR;
+ msr = MSR_MFGPT_NR;
mask = 1 << (timer + 24);
break;
case MFGPT_EVENT_NMI:
- msr = MFGPT_NR_MSR;
+ msr = MSR_MFGPT_NR;
mask = 1 << (timer + shift);
break;
case MFGPT_EVENT_IRQ:
- msr = MFGPT_IRQ_MSR;
+ msr = MSR_MFGPT_IRQ;
mask = 1 << (timer + shift);
break;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 3004d716539d..67e9b4a1e89d 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -4,6 +4,8 @@
#include <linux/smp.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/pm.h>
struct kmem_cache *task_xstate_cachep;
@@ -42,3 +44,118 @@ void arch_task_cache_init(void)
__alignof__(union thread_xstate),
SLAB_PANIC, NULL);
}
+
+static void do_nothing(void *unused)
+{
+}
+
+/*
+ * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
+ * pm_idle and update to new pm_idle value. Required while changing pm_idle
+ * handler on SMP systems.
+ *
+ * Caller must have changed pm_idle to the new value before the call. Old
+ * pm_idle value will not be used by any CPU after the return of this function.
+ */
+void cpu_idle_wait(void)
+{
+ smp_mb();
+ /* kick all the CPUs so that they exit out of pm_idle */
+ smp_call_function(do_nothing, NULL, 0, 1);
+}
+EXPORT_SYMBOL_GPL(cpu_idle_wait);
+
+/*
+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+ * which can obviate IPI to trigger checking of need_resched.
+ * We execute MONITOR against need_resched and enter optimized wait state
+ * through MWAIT. Whenever someone changes need_resched, we would be woken
+ * up from MWAIT (without an IPI).
+ *
+ * New with Core Duo processors, MWAIT can take some hints based on CPU
+ * capability.
+ */
+void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
+{
+ if (!need_resched()) {
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ smp_mb();
+ if (!need_resched())
+ __mwait(ax, cx);
+ }
+}
+
+/* Default MONITOR/MWAIT with no hints, used for default C1 state */
+static void mwait_idle(void)
+{
+ if (!need_resched()) {
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ smp_mb();
+ if (!need_resched())
+ __sti_mwait(0, 0);
+ else
+ local_irq_enable();
+ } else
+ local_irq_enable();
+}
+
+
+static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
+{
+ if (force_mwait)
+ return 1;
+ /* Any C1 states supported? */
+ return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
+}
+
+/*
+ * On SMP it's slightly faster (but much more power-consuming!)
+ * to poll the ->work.need_resched flag instead of waiting for the
+ * cross-CPU IPI to arrive. Use this option with caution.
+ */
+static void poll_idle(void)
+{
+ local_irq_enable();
+ cpu_relax();
+}
+
+void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
+{
+ static int selected;
+
+ if (selected)
+ return;
+#ifdef CONFIG_X86_SMP
+ if (pm_idle == poll_idle && smp_num_siblings > 1) {
+ printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
+ " performance may degrade.\n");
+ }
+#endif
+ if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
+ /*
+ * Skip, if setup has overridden idle.
+ * One CPU supports mwait => All CPUs supports mwait
+ */
+ if (!pm_idle) {
+ printk(KERN_INFO "using mwait in idle threads.\n");
+ pm_idle = mwait_idle;
+ }
+ }
+ selected = 1;
+}
+
+static int __init idle_setup(char *str)
+{
+ if (!strcmp(str, "poll")) {
+ printk("using polling idle threads.\n");
+ pm_idle = poll_idle;
+ } else if (!strcmp(str, "mwait"))
+ force_mwait = 1;
+ else
+ return -1;
+
+ boot_option_idle_override = 1;
+ return 0;
+}
+early_param("idle", idle_setup);
+
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 77de848bd1fb..f8476dfbb60d 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -111,12 +111,10 @@ void default_idle(void)
*/
smp_mb();
- local_irq_disable();
- if (!need_resched()) {
+ if (!need_resched())
safe_halt(); /* enables interrupts racelessly */
- local_irq_disable();
- }
- local_irq_enable();
+ else
+ local_irq_enable();
current_thread_info()->status |= TS_POLLING;
} else {
local_irq_enable();
@@ -128,17 +126,6 @@ void default_idle(void)
EXPORT_SYMBOL(default_idle);
#endif
-/*
- * On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->work.need_resched flag instead of waiting for the
- * cross-CPU IPI to arrive. Use this option with caution.
- */
-static void poll_idle(void)
-{
- local_irq_enable();
- cpu_relax();
-}
-
#ifdef CONFIG_HOTPLUG_CPU
#include <asm/nmi.h>
/* We don't actually take CPU down, just spin without interrupts. */
@@ -196,6 +183,7 @@ void cpu_idle(void)
if (cpu_is_offline(cpu))
play_dead();
+ local_irq_disable();
__get_cpu_var(irq_stat).idle_timestamp = jiffies;
idle();
}
@@ -206,104 +194,6 @@ void cpu_idle(void)
}
}
-static void do_nothing(void *unused)
-{
-}
-
-/*
- * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
- * pm_idle and update to new pm_idle value. Required while changing pm_idle
- * handler on SMP systems.
- *
- * Caller must have changed pm_idle to the new value before the call. Old
- * pm_idle value will not be used by any CPU after the return of this function.
- */
-void cpu_idle_wait(void)
-{
- smp_mb();
- /* kick all the CPUs so that they exit out of pm_idle */
- smp_call_function(do_nothing, NULL, 0, 1);
-}
-EXPORT_SYMBOL_GPL(cpu_idle_wait);
-
-/*
- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
- * which can obviate IPI to trigger checking of need_resched.
- * We execute MONITOR against need_resched and enter optimized wait state
- * through MWAIT. Whenever someone changes need_resched, we would be woken
- * up from MWAIT (without an IPI).
- *
- * New with Core Duo processors, MWAIT can take some hints based on CPU
- * capability.
- */
-void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
-{
- if (!need_resched()) {
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __sti_mwait(ax, cx);
- else
- local_irq_enable();
- } else
- local_irq_enable();
-}
-
-/* Default MONITOR/MWAIT with no hints, used for default C1 state */
-static void mwait_idle(void)
-{
- local_irq_enable();
- mwait_idle_with_hints(0, 0);
-}
-
-static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
-{
- if (force_mwait)
- return 1;
- /* Any C1 states supported? */
- return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
-}
-
-void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
-{
- static int selected;
-
- if (selected)
- return;
-#ifdef CONFIG_X86_SMP
- if (pm_idle == poll_idle && smp_num_siblings > 1) {
- printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
- " performance may degrade.\n");
- }
-#endif
- if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
- /*
- * Skip, if setup has overridden idle.
- * One CPU supports mwait => All CPUs supports mwait
- */
- if (!pm_idle) {
- printk(KERN_INFO "using mwait in idle threads.\n");
- pm_idle = mwait_idle;
- }
- }
- selected = 1;
-}
-
-static int __init idle_setup(char *str)
-{
- if (!strcmp(str, "poll")) {
- printk("using polling idle threads.\n");
- pm_idle = poll_idle;
- } else if (!strcmp(str, "mwait"))
- force_mwait = 1;
- else
- return -1;
-
- boot_option_idle_override = 1;
- return 0;
-}
-early_param("idle", idle_setup);
-
void __show_registers(struct pt_regs *regs, int all)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 131c2ee7ac56..e2319f39988b 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -106,26 +106,13 @@ void default_idle(void)
* test NEED_RESCHED:
*/
smp_mb();
- local_irq_disable();
- if (!need_resched()) {
+ if (!need_resched())
safe_halt(); /* enables interrupts racelessly */
- local_irq_disable();
- }
- local_irq_enable();
+ else
+ local_irq_enable();
current_thread_info()->status |= TS_POLLING;
}
-/*
- * On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->need_resched flag instead of waiting for the
- * cross-CPU IPI to arrive. Use this option with caution.
- */
-static void poll_idle(void)
-{
- local_irq_enable();
- cpu_relax();
-}
-
#ifdef CONFIG_HOTPLUG_CPU
DECLARE_PER_CPU(int, cpu_state);
@@ -192,110 +179,6 @@ void cpu_idle(void)
}
}
-static void do_nothing(void *unused)
-{
-}
-
-/*
- * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
- * pm_idle and update to new pm_idle value. Required while changing pm_idle
- * handler on SMP systems.
- *
- * Caller must have changed pm_idle to the new value before the call. Old
- * pm_idle value will not be used by any CPU after the return of this function.
- */
-void cpu_idle_wait(void)
-{
- smp_mb();
- /* kick all the CPUs so that they exit out of pm_idle */
- smp_call_function(do_nothing, NULL, 0, 1);
-}
-EXPORT_SYMBOL_GPL(cpu_idle_wait);
-
-/*
- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
- * which can obviate IPI to trigger checking of need_resched.
- * We execute MONITOR against need_resched and enter optimized wait state
- * through MWAIT. Whenever someone changes need_resched, we would be woken
- * up from MWAIT (without an IPI).
- *
- * New with Core Duo processors, MWAIT can take some hints based on CPU
- * capability.
- */
-void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
-{
- if (!need_resched()) {
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __mwait(ax, cx);
- }
-}
-
-/* Default MONITOR/MWAIT with no hints, used for default C1 state */
-static void mwait_idle(void)
-{
- if (!need_resched()) {
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __sti_mwait(0, 0);
- else
- local_irq_enable();
- } else {
- local_irq_enable();
- }
-}
-
-
-static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
-{
- if (force_mwait)
- return 1;
- /* Any C1 states supported? */
- return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
-}
-
-void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
-{
- static int selected;
-
- if (selected)
- return;
-#ifdef CONFIG_X86_SMP
- if (pm_idle == poll_idle && smp_num_siblings > 1) {
- printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
- " performance may degrade.\n");
- }
-#endif
- if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
- /*
- * Skip, if setup has overridden idle.
- * One CPU supports mwait => All CPUs supports mwait
- */
- if (!pm_idle) {
- printk(KERN_INFO "using mwait in idle threads.\n");
- pm_idle = mwait_idle;
- }
- }
- selected = 1;
-}
-
-static int __init idle_setup(char *str)
-{
- if (!strcmp(str, "poll")) {
- printk("using polling idle threads.\n");
- pm_idle = poll_idle;
- } else if (!strcmp(str, "mwait"))
- force_mwait = 1;
- else
- return -1;
-
- boot_option_idle_override = 1;
- return 0;
-}
-early_param("idle", idle_setup);
-
/* Prints also some state that isn't saved in the pt_regs */
void __show_regs(struct pt_regs * regs)
{
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 1791a751a772..a4a838306b2c 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -399,7 +399,7 @@ static void native_machine_emergency_restart(void)
}
}
-static void native_machine_shutdown(void)
+void native_machine_shutdown(void)
{
/* Stop the cpus and apics */
#ifdef CONFIG_SMP
@@ -470,7 +470,10 @@ struct machine_ops machine_ops = {
.shutdown = native_machine_shutdown,
.emergency_restart = native_machine_emergency_restart,
.restart = native_machine_restart,
- .halt = native_machine_halt
+ .halt = native_machine_halt,
+#ifdef CONFIG_KEXEC
+ .crash_shutdown = native_machine_crash_shutdown,
+#endif
};
void machine_power_off(void)
@@ -498,3 +501,9 @@ void machine_halt(void)
machine_ops.halt();
}
+#ifdef CONFIG_KEXEC
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+ machine_ops.crash_shutdown(regs);
+}
+#endif
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index 44cc9b933932..2283422af794 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -47,6 +47,7 @@
#include <linux/pfn.h>
#include <linux/pci.h>
#include <linux/init_ohci1394_dma.h>
+#include <linux/kvm_para.h>
#include <video/edid.h>
@@ -820,6 +821,10 @@ void __init setup_arch(char **cmdline_p)
max_low_pfn = setup_memory();
+#ifdef CONFIG_KVM_CLOCK
+ kvmclock_init();
+#endif
+
#ifdef CONFIG_VMI
/*
* Must be after max_low_pfn is determined, and before kernel
@@ -827,6 +832,7 @@ void __init setup_arch(char **cmdline_p)
*/
vmi_init();
#endif
+ kvm_guest_init();
/*
* NOTE: before this point _nobody_ is allowed to allocate
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index 60e64c8eee92..a94fb959a87a 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -42,6 +42,7 @@
#include <linux/ctype.h>
#include <linux/uaccess.h>
#include <linux/init_ohci1394_dma.h>
+#include <linux/kvm_para.h>
#include <asm/mtrr.h>
#include <asm/uaccess.h>
@@ -384,6 +385,10 @@ void __init setup_arch(char **cmdline_p)
io_delay_init();
+#ifdef CONFIG_KVM_CLOCK
+ kvmclock_init();
+#endif
+
#ifdef CONFIG_SMP
/* setup to use the early static init tables during kernel startup */
x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init;
@@ -488,6 +493,8 @@ void __init setup_arch(char **cmdline_p)
init_apic_mappings();
ioapic_init_mappings();
+ kvm_guest_init();
+
/*
* We trust e820 completely. No explicit ROM probing in memory.
*/
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 41962e793c0f..8d45fabc5f3b 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -19,7 +19,7 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
- depends on HAVE_KVM && EXPERIMENTAL
+ depends on HAVE_KVM
select PREEMPT_NOTIFIERS
select ANON_INODES
---help---
@@ -50,6 +50,17 @@ config KVM_AMD
Provides support for KVM on AMD processors equipped with the AMD-V
(SVM) extensions.
+config KVM_TRACE
+ bool "KVM trace support"
+ depends on KVM && MARKERS && SYSFS
+ select RELAY
+ select DEBUG_FS
+ default n
+ ---help---
+ This option allows reading a trace of kvm-related events through
+ relayfs. Note the ABI is not considered stable and will be
+ modified in future updates.
+
# OK, it's a little counter-intuitive to do this, but it puts it neatly under
# the virtualization menu.
source drivers/lguest/Kconfig
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index ffdd0b310784..c97d35c218db 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -3,10 +3,14 @@
#
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o)
+ifeq ($(CONFIG_KVM_TRACE),y)
+common-objs += $(addprefix ../../../virt/kvm/, kvm_trace.o)
+endif
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm
-kvm-objs := $(common-objs) x86.o mmu.o x86_emulate.o i8259.o irq.o lapic.o
+kvm-objs := $(common-objs) x86.o mmu.o x86_emulate.o i8259.o irq.o lapic.o \
+ i8254.o
obj-$(CONFIG_KVM) += kvm.o
kvm-intel-objs = vmx.o
obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
new file mode 100644
index 000000000000..361e31611276
--- /dev/null
+++ b/arch/x86/kvm/i8254.c
@@ -0,0 +1,611 @@
+/*
+ * 8253/8254 interval timer emulation
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ * Copyright (c) 2006 Intel Corporation
+ * Copyright (c) 2007 Keir Fraser, XenSource Inc
+ * Copyright (c) 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Authors:
+ * Sheng Yang <sheng.yang@intel.com>
+ * Based on QEMU and Xen.
+ */
+
+#include <linux/kvm_host.h>
+
+#include "irq.h"
+#include "i8254.h"
+
+#ifndef CONFIG_X86_64
+#define mod_64(x, y) ((x) - (y) * div64_64(x, y))
+#else
+#define mod_64(x, y) ((x) % (y))
+#endif
+
+#define RW_STATE_LSB 1
+#define RW_STATE_MSB 2
+#define RW_STATE_WORD0 3
+#define RW_STATE_WORD1 4
+
+/* Compute with 96 bit intermediate result: (a*b)/c */
+static u64 muldiv64(u64 a, u32 b, u32 c)
+{
+ union {
+ u64 ll;
+ struct {
+ u32 low, high;
+ } l;
+ } u, res;
+ u64 rl, rh;
+
+ u.ll = a;
+ rl = (u64)u.l.low * (u64)b;
+ rh = (u64)u.l.high * (u64)b;
+ rh += (rl >> 32);
+ res.l.high = div64_64(rh, c);
+ res.l.low = div64_64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c);
+ return res.ll;
+}
+
+static void pit_set_gate(struct kvm *kvm, int channel, u32 val)
+{
+ struct kvm_kpit_channel_state *c =
+ &kvm->arch.vpit->pit_state.channels[channel];
+
+ WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+ switch (c->mode) {
+ default:
+ case 0:
+ case 4:
+ /* XXX: just disable/enable counting */
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 5:
+ /* Restart counting on rising edge. */
+ if (c->gate < val)
+ c->count_load_time = ktime_get();
+ break;
+ }
+
+ c->gate = val;
+}
+
+int pit_get_gate(struct kvm *kvm, int channel)
+{
+ WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+ return kvm->arch.vpit->pit_state.channels[channel].gate;
+}
+
+static int pit_get_count(struct kvm *kvm, int channel)
+{
+ struct kvm_kpit_channel_state *c =
+ &kvm->arch.vpit->pit_state.channels[channel];
+ s64 d, t;
+ int counter;
+
+ WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+ t = ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
+ d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
+
+ switch (c->mode) {
+ case 0:
+ case 1:
+ case 4:
+ case 5:
+ counter = (c->count - d) & 0xffff;
+ break;
+ case 3:
+ /* XXX: may be incorrect for odd counts */
+ counter = c->count - (mod_64((2 * d), c->count));
+ break;
+ default:
+ counter = c->count - mod_64(d, c->count);
+ break;
+ }
+ return counter;
+}
+
+static int pit_get_out(struct kvm *kvm, int channel)
+{
+ struct kvm_kpit_channel_state *c =
+ &kvm->arch.vpit->pit_state.channels[channel];
+ s64 d, t;
+ int out;
+
+ WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+ t = ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
+ d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
+
+ switch (c->mode) {
+ default:
+ case 0:
+ out = (d >= c->count);
+ break;
+ case 1:
+ out = (d < c->count);
+ break;
+ case 2:
+ out = ((mod_64(d, c->count) == 0) && (d != 0));
+ break;
+ case 3:
+ out = (mod_64(d, c->count) < ((c->count + 1) >> 1));
+ break;
+ case 4:
+ case 5:
+ out = (d == c->count);
+ break;
+ }
+
+ return out;
+}
+
+static void pit_latch_count(struct kvm *kvm, int channel)
+{
+ struct kvm_kpit_channel_state *c =
+ &kvm->arch.vpit->pit_state.channels[channel];
+
+ WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+ if (!c->count_latched) {
+ c->latched_count = pit_get_count(kvm, channel);
+ c->count_latched = c->rw_mode;
+ }
+}
+
+static void pit_latch_status(struct kvm *kvm, int channel)
+{
+ struct kvm_kpit_channel_state *c =
+ &kvm->arch.vpit->pit_state.channels[channel];
+
+ WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+ if (!c->status_latched) {
+ /* TODO: Return NULL COUNT (bit 6). */
+ c->status = ((pit_get_out(kvm, channel) << 7) |
+ (c->rw_mode << 4) |
+ (c->mode << 1) |
+ c->bcd);
+ c->status_latched = 1;
+ }
+}
+
+int __pit_timer_fn(struct kvm_kpit_state *ps)
+{
+ struct kvm_vcpu *vcpu0 = ps->pit->kvm->vcpus[0];
+ struct kvm_kpit_timer *pt = &ps->pit_timer;
+
+ atomic_inc(&pt->pending);
+ smp_mb__after_atomic_inc();
+ /* FIXME: handle case where the guest is in guest mode */
+ if (vcpu0 && waitqueue_active(&vcpu0->wq)) {
+ vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ wake_up_interruptible(&vcpu0->wq);
+ }
+
+ pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period);
+ pt->scheduled = ktime_to_ns(pt->timer.expires);
+
+ return (pt->period == 0 ? 0 : 1);
+}
+
+int pit_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pit *pit = vcpu->kvm->arch.vpit;
+
+ if (pit && vcpu->vcpu_id == 0)
+ return atomic_read(&pit->pit_state.pit_timer.pending);
+
+ return 0;
+}
+
+static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
+{
+ struct kvm_kpit_state *ps;
+ int restart_timer = 0;
+
+ ps = container_of(data, struct kvm_kpit_state, pit_timer.timer);
+
+ restart_timer = __pit_timer_fn(ps);
+
+ if (restart_timer)
+ return HRTIMER_RESTART;
+ else
+ return HRTIMER_NORESTART;
+}
+
+static void destroy_pit_timer(struct kvm_kpit_timer *pt)
+{
+ pr_debug("pit: execute del timer!\n");
+ hrtimer_cancel(&pt->timer);
+}
+
+static void create_pit_timer(struct kvm_kpit_timer *pt, u32 val, int is_period)
+{
+ s64 interval;
+
+ interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
+
+ pr_debug("pit: create pit timer, interval is %llu nsec\n", interval);
+
+ /* TODO The new value only affected after the retriggered */
+ hrtimer_cancel(&pt->timer);
+ pt->period = (is_period == 0) ? 0 : interval;
+ pt->timer.function = pit_timer_fn;
+ atomic_set(&pt->pending, 0);
+
+ hrtimer_start(&pt->timer, ktime_add_ns(ktime_get(), interval),
+ HRTIMER_MODE_ABS);
+}
+
+static void pit_load_count(struct kvm *kvm, int channel, u32 val)
+{
+ struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
+
+ WARN_ON(!mutex_is_locked(&ps->lock));
+
+ pr_debug("pit: load_count val is %d, channel is %d\n", val, channel);
+
+ /*
+ * Though spec said the state of 8254 is undefined after power-up,
+ * seems some tricky OS like Windows XP depends on IRQ0 interrupt
+ * when booting up.
+ * So here setting initialize rate for it, and not a specific number
+ */
+ if (val == 0)
+ val = 0x10000;
+
+ ps->channels[channel].count_load_time = ktime_get();
+ ps->channels[channel].count = val;
+
+ if (channel != 0)
+ return;
+
+ /* Two types of timer
+ * mode 1 is one shot, mode 2 is period, otherwise del timer */
+ switch (ps->channels[0].mode) {
+ case 1:
+ create_pit_timer(&ps->pit_timer, val, 0);
+ break;
+ case 2:
+ create_pit_timer(&ps->pit_timer, val, 1);
+ break;
+ default:
+ destroy_pit_timer(&ps->pit_timer);
+ }
+}
+
+void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val)
+{
+ mutex_lock(&kvm->arch.vpit->pit_state.lock);
+ pit_load_count(kvm, channel, val);
+ mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+}
+
+static void pit_ioport_write(struct kvm_io_device *this,
+ gpa_t addr, int len, const void *data)
+{
+ struct kvm_pit *pit = (struct kvm_pit *)this->private;
+ struct kvm_kpit_state *pit_state = &pit->pit_state;
+ struct kvm *kvm = pit->kvm;
+ int channel, access;
+ struct kvm_kpit_channel_state *s;
+ u32 val = *(u32 *) data;
+
+ val &= 0xff;
+ addr &= KVM_PIT_CHANNEL_MASK;
+
+ mutex_lock(&pit_state->lock);
+
+ if (val != 0)
+ pr_debug("pit: write addr is 0x%x, len is %d, val is 0x%x\n",
+ (unsigned int)addr, len, val);
+
+ if (addr == 3) {
+ channel = val >> 6;
+ if (channel == 3) {
+ /* Read-Back Command. */
+ for (channel = 0; channel < 3; channel++) {
+ s = &pit_state->channels[channel];
+ if (val & (2 << channel)) {
+ if (!(val & 0x20))
+ pit_latch_count(kvm, channel);
+ if (!(val & 0x10))
+ pit_latch_status(kvm, channel);
+ }
+ }
+ } else {
+ /* Select Counter <channel>. */
+ s = &pit_state->channels[channel];
+ access = (val >> 4) & KVM_PIT_CHANNEL_MASK;
+ if (access == 0) {
+ pit_latch_count(kvm, channel);
+ } else {
+ s->rw_mode = access;
+ s->read_state = access;
+ s->write_state = access;
+ s->mode = (val >> 1) & 7;
+ if (s->mode > 5)
+ s->mode -= 4;
+ s->bcd = val & 1;
+ }
+ }
+ } else {
+ /* Write Count. */
+ s = &pit_state->channels[addr];
+ switch (s->write_state) {
+ default:
+ case RW_STATE_LSB:
+ pit_load_count(kvm, addr, val);
+ break;
+ case RW_STATE_MSB:
+ pit_load_count(kvm, addr, val << 8);
+ break;
+ case RW_STATE_WORD0:
+ s->write_latch = val;
+ s->write_state = RW_STATE_WORD1;
+ break;
+ case RW_STATE_WORD1:
+ pit_load_count(kvm, addr, s->write_latch | (val << 8));
+ s->write_state = RW_STATE_WORD0;
+ break;
+ }
+ }
+
+ mutex_unlock(&pit_state->lock);
+}
+
+static void pit_ioport_read(struct kvm_io_device *this,
+ gpa_t addr, int len, void *data)
+{
+ struct kvm_pit *pit = (struct kvm_pit *)this->private;
+ struct kvm_kpit_state *pit_state = &pit->pit_state;
+ struct kvm *kvm = pit->kvm;
+ int ret, count;
+ struct kvm_kpit_channel_state *s;
+
+ addr &= KVM_PIT_CHANNEL_MASK;
+ s = &pit_state->channels[addr];
+
+ mutex_lock(&pit_state->lock);
+
+ if (s->status_latched) {
+ s->status_latched = 0;
+ ret = s->status;
+ } else if (s->count_latched) {
+ switch (s->count_latched) {
+ default:
+ case RW_STATE_LSB:
+ ret = s->latched_count & 0xff;
+ s->count_latched = 0;
+ break;
+ case RW_STATE_MSB:
+ ret = s->latched_count >> 8;
+ s->count_latched = 0;
+ break;
+ case RW_STATE_WORD0:
+ ret = s->latched_count & 0xff;
+ s->count_latched = RW_STATE_MSB;
+ break;
+ }
+ } else {
+ switch (s->read_state) {
+ default:
+ case RW_STATE_LSB:
+ count = pit_get_count(kvm, addr);
+ ret = count & 0xff;
+ break;
+ case RW_STATE_MSB:
+ count = pit_get_count(kvm, addr);
+ ret = (count >> 8) & 0xff;
+ break;
+ case RW_STATE_WORD0:
+ count = pit_get_count(kvm, addr);
+ ret = count & 0xff;
+ s->read_state = RW_STATE_WORD1;
+ break;
+ case RW_STATE_WORD1:
+ count = pit_get_count(kvm, addr);
+ ret = (count >> 8) & 0xff;
+ s->read_state = RW_STATE_WORD0;
+ break;
+ }
+ }
+
+ if (len > sizeof(ret))
+ len = sizeof(ret);
+ memcpy(data, (char *)&ret, len);
+
+ mutex_unlock(&pit_state->lock);
+}
+
+static int pit_in_range(struct kvm_io_device *this, gpa_t addr)
+{
+ return ((addr >= KVM_PIT_BASE_ADDRESS) &&
+ (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
+}
+
+static void speaker_ioport_write(struct kvm_io_device *this,
+ gpa_t addr, int len, const void *data)
+{
+ struct kvm_pit *pit = (struct kvm_pit *)this->private;
+ struct kvm_kpit_state *pit_state = &pit->pit_state;
+ struct kvm *kvm = pit->kvm;
+ u32 val = *(u32 *) data;
+
+ mutex_lock(&pit_state->lock);
+ pit_state->speaker_data_on = (val >> 1) & 1;
+ pit_set_gate(kvm, 2, val & 1);
+ mutex_unlock(&pit_state->lock);
+}
+
+static void speaker_ioport_read(struct kvm_io_device *this,
+ gpa_t addr, int len, void *data)
+{
+ struct kvm_pit *pit = (struct kvm_pit *)this->private;
+ struct kvm_kpit_state *pit_state = &pit->pit_state;
+ struct kvm *kvm = pit->kvm;
+ unsigned int refresh_clock;
+ int ret;
+
+ /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
+ refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
+
+ mutex_lock(&pit_state->lock);
+ ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) |
+ (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4));
+ if (len > sizeof(ret))
+ len = sizeof(ret);
+ memcpy(data, (char *)&ret, len);
+ mutex_unlock(&pit_state->lock);
+}
+
+static int speaker_in_range(struct kvm_io_device *this, gpa_t addr)
+{
+ return (addr == KVM_SPEAKER_BASE_ADDRESS);
+}
+
+void kvm_pit_reset(struct kvm_pit *pit)
+{
+ int i;
+ struct kvm_kpit_channel_state *c;
+
+ mutex_lock(&pit->pit_state.lock);
+ for (i = 0; i < 3; i++) {
+ c = &pit->pit_state.channels[i];
+ c->mode = 0xff;
+ c->gate = (i != 2);
+ pit_load_count(pit->kvm, i, 0);
+ }
+ mutex_unlock(&pit->pit_state.lock);
+
+ atomic_set(&pit->pit_state.pit_timer.pending, 0);
+ pit->pit_state.inject_pending = 1;
+}
+
+struct kvm_pit *kvm_create_pit(struct kvm *kvm)
+{
+ struct kvm_pit *pit;
+ struct kvm_kpit_state *pit_state;
+
+ pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL);
+ if (!pit)
+ return NULL;
+
+ mutex_init(&pit->pit_state.lock);
+ mutex_lock(&pit->pit_state.lock);
+
+ /* Initialize PIO device */
+ pit->dev.read = pit_ioport_read;
+ pit->dev.write = pit_ioport_write;
+ pit->dev.in_range = pit_in_range;
+ pit->dev.private = pit;
+ kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev);
+
+ pit->speaker_dev.read = speaker_ioport_read;
+ pit->speaker_dev.write = speaker_ioport_write;
+ pit->speaker_dev.in_range = speaker_in_range;
+ pit->speaker_dev.private = pit;
+ kvm_io_bus_register_dev(&kvm->pio_bus, &pit->speaker_dev);
+
+ kvm->arch.vpit = pit;
+ pit->kvm = kvm;
+
+ pit_state = &pit->pit_state;
+ pit_state->pit = pit;
+ hrtimer_init(&pit_state->pit_timer.timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ mutex_unlock(&pit->pit_state.lock);
+
+ kvm_pit_reset(pit);
+
+ return pit;
+}
+
+void kvm_free_pit(struct kvm *kvm)
+{
+ struct hrtimer *timer;
+
+ if (kvm->arch.vpit) {
+ mutex_lock(&kvm->arch.vpit->pit_state.lock);
+ timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
+ hrtimer_cancel(timer);
+ mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+ kfree(kvm->arch.vpit);
+ }
+}
+
+void __inject_pit_timer_intr(struct kvm *kvm)
+{
+ mutex_lock(&kvm->lock);
+ kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 1);
+ kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 0);
+ kvm_pic_set_irq(pic_irqchip(kvm), 0, 1);
+ kvm_pic_set_irq(pic_irqchip(kvm), 0, 0);
+ mutex_unlock(&kvm->lock);
+}
+
+void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pit *pit = vcpu->kvm->arch.vpit;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_kpit_state *ps;
+
+ if (vcpu && pit) {
+ ps = &pit->pit_state;
+
+ /* Try to inject pending interrupts when:
+ * 1. Pending exists
+ * 2. Last interrupt was accepted or waited for too long time*/
+ if (atomic_read(&ps->pit_timer.pending) &&
+ (ps->inject_pending ||
+ (jiffies - ps->last_injected_time
+ >= KVM_MAX_PIT_INTR_INTERVAL))) {
+ ps->inject_pending = 0;
+ __inject_pit_timer_intr(kvm);
+ ps->last_injected_time = jiffies;
+ }
+ }
+}
+
+void kvm_pit_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
+{
+ struct kvm_arch *arch = &vcpu->kvm->arch;
+ struct kvm_kpit_state *ps;
+
+ if (vcpu && arch->vpit) {
+ ps = &arch->vpit->pit_state;
+ if (atomic_read(&ps->pit_timer.pending) &&
+ (((arch->vpic->pics[0].imr & 1) == 0 &&
+ arch->vpic->pics[0].irq_base == vec) ||
+ (arch->vioapic->redirtbl[0].fields.vector == vec &&
+ arch->vioapic->redirtbl[0].fields.mask != 1))) {
+ ps->inject_pending = 1;
+ atomic_dec(&ps->pit_timer.pending);
+ ps->channels[0].count_load_time = ktime_get();
+ }
+ }
+}
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
new file mode 100644
index 000000000000..db25c2a6c8c4
--- /dev/null
+++ b/arch/x86/kvm/i8254.h
@@ -0,0 +1,63 @@
+#ifndef __I8254_H
+#define __I8254_H
+
+#include "iodev.h"
+
+struct kvm_kpit_timer {
+ struct hrtimer timer;
+ int irq;
+ s64 period; /* unit: ns */
+ s64 scheduled;
+ ktime_t last_update;
+ atomic_t pending;
+};
+
+struct kvm_kpit_channel_state {
+ u32 count; /* can be 65536 */
+ u16 latched_count;
+ u8 count_latched;
+ u8 status_latched;
+ u8 status;
+ u8 read_state;
+ u8 write_state;
+ u8 write_latch;
+ u8 rw_mode;
+ u8 mode;
+ u8 bcd; /* not supported */
+ u8 gate; /* timer start */
+ ktime_t count_load_time;
+};
+
+struct kvm_kpit_state {
+ struct kvm_kpit_channel_state channels[3];
+ struct kvm_kpit_timer pit_timer;
+ u32 speaker_data_on;
+ struct mutex lock;
+ struct kvm_pit *pit;
+ bool inject_pending; /* if inject pending interrupts */
+ unsigned long last_injected_time;
+};
+
+struct kvm_pit {
+ unsigned long base_addresss;
+ struct kvm_io_device dev;
+ struct kvm_io_device speaker_dev;
+ struct kvm *kvm;
+ struct kvm_kpit_state pit_state;
+};
+
+#define KVM_PIT_BASE_ADDRESS 0x40
+#define KVM_SPEAKER_BASE_ADDRESS 0x61
+#define KVM_PIT_MEM_LENGTH 4
+#define KVM_PIT_FREQ 1193181
+#define KVM_MAX_PIT_INTR_INTERVAL HZ / 100
+#define KVM_PIT_CHANNEL_MASK 0x3
+
+void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu);
+void kvm_pit_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
+void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val);
+struct kvm_pit *kvm_create_pit(struct kvm *kvm);
+void kvm_free_pit(struct kvm *kvm);
+void kvm_pit_reset(struct kvm_pit *pit);
+
+#endif
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index e5714759e97f..ce1f583459b1 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -23,6 +23,22 @@
#include <linux/kvm_host.h>
#include "irq.h"
+#include "i8254.h"
+
+/*
+ * check if there are pending timer events
+ * to be processed.
+ */
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ ret = pit_has_pending_timer(vcpu);
+ ret |= apic_has_pending_timer(vcpu);
+
+ return ret;
+}
+EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
/*
* check if there is pending interrupt without
@@ -66,6 +82,7 @@ EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
{
kvm_inject_apic_timer_irqs(vcpu);
+ kvm_inject_pit_timer_irqs(vcpu);
/* TODO: PIT, RTC etc. */
}
EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
@@ -73,6 +90,7 @@ EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
{
kvm_apic_timer_intr_post(vcpu, vec);
+ kvm_pit_timer_intr_post(vcpu, vec);
/* TODO: PIT, RTC etc. */
}
EXPORT_SYMBOL_GPL(kvm_timer_intr_post);
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index fa5ed5d59b5d..1802134b836f 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -85,4 +85,7 @@ void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
+int pit_has_pending_timer(struct kvm_vcpu *vcpu);
+int apic_has_pending_timer(struct kvm_vcpu *vcpu);
+
#endif
diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h
index ecdfe97e4635..65ef0fc2c036 100644
--- a/arch/x86/kvm/kvm_svm.h
+++ b/arch/x86/kvm/kvm_svm.h
@@ -39,6 +39,8 @@ struct vcpu_svm {
unsigned long host_db_regs[NUM_DB_REGS];
unsigned long host_dr6;
unsigned long host_dr7;
+
+ u32 *msrpm;
};
#endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 68a6b1511934..57ac4e4c556a 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -338,10 +338,10 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
} else
apic_clear_vector(vector, apic->regs + APIC_TMR);
- if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE)
+ if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
kvm_vcpu_kick(vcpu);
- else if (vcpu->arch.mp_state == VCPU_MP_STATE_HALTED) {
- vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
+ else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
}
@@ -362,11 +362,11 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
case APIC_DM_INIT:
if (level) {
- if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE)
+ if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
printk(KERN_DEBUG
"INIT on a runnable vcpu %d\n",
vcpu->vcpu_id);
- vcpu->arch.mp_state = VCPU_MP_STATE_INIT_RECEIVED;
+ vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
kvm_vcpu_kick(vcpu);
} else {
printk(KERN_DEBUG
@@ -379,9 +379,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
case APIC_DM_STARTUP:
printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n",
vcpu->vcpu_id, vector);
- if (vcpu->arch.mp_state == VCPU_MP_STATE_INIT_RECEIVED) {
+ if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
vcpu->arch.sipi_vector = vector;
- vcpu->arch.mp_state = VCPU_MP_STATE_SIPI_RECEIVED;
+ vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED;
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
}
@@ -658,7 +658,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
PRIx64 ", "
"timer initial count 0x%x, period %lldns, "
- "expire @ 0x%016" PRIx64 ".\n", __FUNCTION__,
+ "expire @ 0x%016" PRIx64 ".\n", __func__,
APIC_BUS_CYCLE_NS, ktime_to_ns(now),
apic_get_reg(apic, APIC_TMICT),
apic->timer.period,
@@ -691,7 +691,7 @@ static void apic_mmio_write(struct kvm_io_device *this,
/* too common printing */
if (offset != APIC_EOI)
apic_debug("%s: offset 0x%x with length 0x%x, and value is "
- "0x%x\n", __FUNCTION__, offset, len, val);
+ "0x%x\n", __func__, offset, len, val);
offset &= 0xff0;
@@ -822,6 +822,7 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
| (apic_get_reg(apic, APIC_TASKPRI) & 4));
}
+EXPORT_SYMBOL_GPL(kvm_lapic_set_tpr);
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
{
@@ -869,7 +870,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
struct kvm_lapic *apic;
int i;
- apic_debug("%s\n", __FUNCTION__);
+ apic_debug("%s\n", __func__);
ASSERT(vcpu);
apic = vcpu->arch.apic;
@@ -907,7 +908,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
apic_update_ppr(apic);
apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
- "0x%016" PRIx64 ", base_address=0x%0lx.\n", __FUNCTION__,
+ "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
vcpu, kvm_apic_id(apic),
vcpu->arch.apic_base, apic->base_address);
}
@@ -940,7 +941,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
atomic_inc(&apic->timer.pending);
if (waitqueue_active(q)) {
- apic->vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
+ apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
wake_up_interruptible(q);
}
if (apic_lvtt_period(apic)) {
@@ -952,6 +953,16 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
return result;
}
+int apic_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *lapic = vcpu->arch.apic;
+
+ if (lapic)
+ return atomic_read(&lapic->timer.pending);
+
+ return 0;
+}
+
static int __inject_apic_timer_irq(struct kvm_lapic *apic)
{
int vector;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e55af12e11b7..2ad6f5481671 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -27,11 +27,22 @@
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/swap.h>
+#include <linux/hugetlb.h>
+#include <linux/compiler.h>
#include <asm/page.h>
#include <asm/cmpxchg.h>
#include <asm/io.h>
+/*
+ * When setting this variable to true it enables Two-Dimensional-Paging
+ * where the hardware walks 2 page tables:
+ * 1. the guest-virtual to guest-physical
+ * 2. while doing 1. it walks guest-physical to host-physical
+ * If the hardware supports that we don't need to do shadow paging.
+ */
+bool tdp_enabled = false;
+
#undef MMU_DEBUG
#undef AUDIT
@@ -101,8 +112,6 @@ static int dbg = 1;
#define PT_FIRST_AVAIL_BITS_SHIFT 9
#define PT64_SECOND_AVAIL_BITS_SHIFT 52
-#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
-
#define VALID_PAGE(x) ((x) != INVALID_PAGE)
#define PT64_LEVEL_BITS 9
@@ -159,6 +168,13 @@ static int dbg = 1;
#define ACC_USER_MASK PT_USER_MASK
#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
+struct kvm_pv_mmu_op_buffer {
+ void *ptr;
+ unsigned len;
+ unsigned processed;
+ char buf[512] __aligned(sizeof(long));
+};
+
struct kvm_rmap_desc {
u64 *shadow_ptes[RMAP_EXT];
struct kvm_rmap_desc *more;
@@ -200,11 +216,15 @@ static int is_present_pte(unsigned long pte)
static int is_shadow_present_pte(u64 pte)
{
- pte &= ~PT_SHADOW_IO_MARK;
return pte != shadow_trap_nonpresent_pte
&& pte != shadow_notrap_nonpresent_pte;
}
+static int is_large_pte(u64 pte)
+{
+ return pte & PT_PAGE_SIZE_MASK;
+}
+
static int is_writeble_pte(unsigned long pte)
{
return pte & PT_WRITABLE_MASK;
@@ -215,14 +235,14 @@ static int is_dirty_pte(unsigned long pte)
return pte & PT_DIRTY_MASK;
}
-static int is_io_pte(unsigned long pte)
+static int is_rmap_pte(u64 pte)
{
- return pte & PT_SHADOW_IO_MARK;
+ return is_shadow_present_pte(pte);
}
-static int is_rmap_pte(u64 pte)
+static pfn_t spte_to_pfn(u64 pte)
{
- return is_shadow_present_pte(pte);
+ return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
}
static gfn_t pse36_gfn_delta(u32 gpte)
@@ -349,16 +369,100 @@ static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
}
/*
+ * Return the pointer to the largepage write count for a given
+ * gfn, handling slots that are not large page aligned.
+ */
+static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
+{
+ unsigned long idx;
+
+ idx = (gfn / KVM_PAGES_PER_HPAGE) -
+ (slot->base_gfn / KVM_PAGES_PER_HPAGE);
+ return &slot->lpage_info[idx].write_count;
+}
+
+static void account_shadowed(struct kvm *kvm, gfn_t gfn)
+{
+ int *write_count;
+
+ write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
+ *write_count += 1;
+ WARN_ON(*write_count > KVM_PAGES_PER_HPAGE);
+}
+
+static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
+{
+ int *write_count;
+
+ write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
+ *write_count -= 1;
+ WARN_ON(*write_count < 0);
+}
+
+static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
+{
+ struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
+ int *largepage_idx;
+
+ if (slot) {
+ largepage_idx = slot_largepage_idx(gfn, slot);
+ return *largepage_idx;
+ }
+
+ return 1;
+}
+
+static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
+{
+ struct vm_area_struct *vma;
+ unsigned long addr;
+
+ addr = gfn_to_hva(kvm, gfn);
+ if (kvm_is_error_hva(addr))
+ return 0;
+
+ vma = find_vma(current->mm, addr);
+ if (vma && is_vm_hugetlb_page(vma))
+ return 1;
+
+ return 0;
+}
+
+static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
+{
+ struct kvm_memory_slot *slot;
+
+ if (has_wrprotected_page(vcpu->kvm, large_gfn))
+ return 0;
+
+ if (!host_largepage_backed(vcpu->kvm, large_gfn))
+ return 0;
+
+ slot = gfn_to_memslot(vcpu->kvm, large_gfn);
+ if (slot && slot->dirty_bitmap)
+ return 0;
+
+ return 1;
+}
+
+/*
* Take gfn and return the reverse mapping to it.
* Note: gfn must be unaliased before this function get called
*/
-static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
+static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
{
struct kvm_memory_slot *slot;
+ unsigned long idx;
slot = gfn_to_memslot(kvm, gfn);
- return &slot->rmap[gfn - slot->base_gfn];
+ if (!lpage)
+ return &slot->rmap[gfn - slot->base_gfn];
+
+ idx = (gfn / KVM_PAGES_PER_HPAGE) -
+ (slot->base_gfn / KVM_PAGES_PER_HPAGE);
+
+ return &slot->lpage_info[idx].rmap_pde;
}
/*
@@ -370,7 +474,7 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
* If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
* containing more mappings.
*/
-static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
+static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
{
struct kvm_mmu_page *sp;
struct kvm_rmap_desc *desc;
@@ -382,7 +486,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
gfn = unalias_gfn(vcpu->kvm, gfn);
sp = page_header(__pa(spte));
sp->gfns[spte - sp->spt] = gfn;
- rmapp = gfn_to_rmap(vcpu->kvm, gfn);
+ rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
if (!*rmapp) {
rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
*rmapp = (unsigned long)spte;
@@ -435,20 +539,21 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
struct kvm_rmap_desc *desc;
struct kvm_rmap_desc *prev_desc;
struct kvm_mmu_page *sp;
- struct page *page;
+ pfn_t pfn;
unsigned long *rmapp;
int i;
if (!is_rmap_pte(*spte))
return;
sp = page_header(__pa(spte));
- page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
- mark_page_accessed(page);
+ pfn = spte_to_pfn(*spte);
+ if (*spte & PT_ACCESSED_MASK)
+ kvm_set_pfn_accessed(pfn);
if (is_writeble_pte(*spte))
- kvm_release_page_dirty(page);
+ kvm_release_pfn_dirty(pfn);
else
- kvm_release_page_clean(page);
- rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt]);
+ kvm_release_pfn_clean(pfn);
+ rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
if (!*rmapp) {
printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
BUG();
@@ -514,7 +619,7 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
int write_protected = 0;
gfn = unalias_gfn(kvm, gfn);
- rmapp = gfn_to_rmap(kvm, gfn);
+ rmapp = gfn_to_rmap(kvm, gfn, 0);
spte = rmap_next(kvm, rmapp, NULL);
while (spte) {
@@ -527,8 +632,35 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
}
spte = rmap_next(kvm, rmapp, spte);
}
+ if (write_protected) {
+ pfn_t pfn;
+
+ spte = rmap_next(kvm, rmapp, NULL);
+ pfn = spte_to_pfn(*spte);
+ kvm_set_pfn_dirty(pfn);
+ }
+
+ /* check for huge page mappings */
+ rmapp = gfn_to_rmap(kvm, gfn, 1);
+ spte = rmap_next(kvm, rmapp, NULL);
+ while (spte) {
+ BUG_ON(!spte);
+ BUG_ON(!(*spte & PT_PRESENT_MASK));
+ BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
+ pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
+ if (is_writeble_pte(*spte)) {
+ rmap_remove(kvm, spte);
+ --kvm->stat.lpages;
+ set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+ write_protected = 1;
+ }
+ spte = rmap_next(kvm, rmapp, spte);
+ }
+
if (write_protected)
kvm_flush_remote_tlbs(kvm);
+
+ account_shadowed(kvm, gfn);
}
#ifdef MMU_DEBUG
@@ -538,8 +670,8 @@ static int is_empty_shadow_page(u64 *spt)
u64 *end;
for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
- if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
- printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
+ if (*pos != shadow_trap_nonpresent_pte) {
+ printk(KERN_ERR "%s: %p %llx\n", __func__,
pos, *pos);
return 0;
}
@@ -559,7 +691,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
- return gfn;
+ return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
}
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
@@ -662,13 +794,14 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
struct kvm_mmu_page *sp;
struct hlist_node *node;
- pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
- index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+ pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
+ index = kvm_page_table_hashfn(gfn);
bucket = &kvm->arch.mmu_page_hash[index];
hlist_for_each_entry(sp, node, bucket, hash_link)
- if (sp->gfn == gfn && !sp->role.metaphysical) {
+ if (sp->gfn == gfn && !sp->role.metaphysical
+ && !sp->role.invalid) {
pgprintk("%s: found role %x\n",
- __FUNCTION__, sp->role.word);
+ __func__, sp->role.word);
return sp;
}
return NULL;
@@ -699,27 +832,27 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant;
}
- pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
+ pgprintk("%s: looking gfn %lx role %x\n", __func__,
gfn, role.word);
- index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+ index = kvm_page_table_hashfn(gfn);
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
hlist_for_each_entry(sp, node, bucket, hash_link)
if (sp->gfn == gfn && sp->role.word == role.word) {
mmu_page_add_parent_pte(vcpu, sp, parent_pte);
- pgprintk("%s: found\n", __FUNCTION__);
+ pgprintk("%s: found\n", __func__);
return sp;
}
++vcpu->kvm->stat.mmu_cache_miss;
sp = kvm_mmu_alloc_page(vcpu, parent_pte);
if (!sp)
return sp;
- pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
+ pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
sp->gfn = gfn;
sp->role = role;
hlist_add_head(&sp->hash_link, bucket);
- vcpu->arch.mmu.prefetch_page(vcpu, sp);
if (!metaphysical)
rmap_write_protect(vcpu->kvm, gfn);
+ vcpu->arch.mmu.prefetch_page(vcpu, sp);
return sp;
}
@@ -745,11 +878,17 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
ent = pt[i];
+ if (is_shadow_present_pte(ent)) {
+ if (!is_large_pte(ent)) {
+ ent &= PT64_BASE_ADDR_MASK;
+ mmu_page_remove_parent_pte(page_header(ent),
+ &pt[i]);
+ } else {
+ --kvm->stat.lpages;
+ rmap_remove(kvm, &pt[i]);
+ }
+ }
pt[i] = shadow_trap_nonpresent_pte;
- if (!is_shadow_present_pte(ent))
- continue;
- ent &= PT64_BASE_ADDR_MASK;
- mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
}
kvm_flush_remote_tlbs(kvm);
}
@@ -789,10 +928,15 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
}
kvm_mmu_page_unlink_children(kvm, sp);
if (!sp->root_count) {
+ if (!sp->role.metaphysical)
+ unaccount_shadowed(kvm, sp->gfn);
hlist_del(&sp->hash_link);
kvm_mmu_free_page(kvm, sp);
- } else
+ } else {
list_move(&sp->link, &kvm->arch.active_mmu_pages);
+ sp->role.invalid = 1;
+ kvm_reload_remote_mmus(kvm);
+ }
kvm_mmu_reset_last_pte_updated(kvm);
}
@@ -838,13 +982,13 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
struct hlist_node *node, *n;
int r;
- pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
+ pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
r = 0;
- index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+ index = kvm_page_table_hashfn(gfn);
bucket = &kvm->arch.mmu_page_hash[index];
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
if (sp->gfn == gfn && !sp->role.metaphysical) {
- pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
+ pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
sp->role.word);
kvm_mmu_zap_page(kvm, sp);
r = 1;
@@ -857,7 +1001,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
struct kvm_mmu_page *sp;
while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
- pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word);
+ pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word);
kvm_mmu_zap_page(kvm, sp);
}
}
@@ -889,26 +1033,39 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
unsigned pt_access, unsigned pte_access,
int user_fault, int write_fault, int dirty,
- int *ptwrite, gfn_t gfn, struct page *page)
+ int *ptwrite, int largepage, gfn_t gfn,
+ pfn_t pfn, bool speculative)
{
u64 spte;
int was_rmapped = 0;
int was_writeble = is_writeble_pte(*shadow_pte);
- hfn_t host_pfn = (*shadow_pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
pgprintk("%s: spte %llx access %x write_fault %d"
" user_fault %d gfn %lx\n",
- __FUNCTION__, *shadow_pte, pt_access,
+ __func__, *shadow_pte, pt_access,
write_fault, user_fault, gfn);
if (is_rmap_pte(*shadow_pte)) {
- if (host_pfn != page_to_pfn(page)) {
+ /*
+ * If we overwrite a PTE page pointer with a 2MB PMD, unlink
+ * the parent of the now unreachable PTE.
+ */
+ if (largepage && !is_large_pte(*shadow_pte)) {
+ struct kvm_mmu_page *child;
+ u64 pte = *shadow_pte;
+
+ child = page_header(pte & PT64_BASE_ADDR_MASK);
+ mmu_page_remove_parent_pte(child, shadow_pte);
+ } else if (pfn != spte_to_pfn(*shadow_pte)) {
pgprintk("hfn old %lx new %lx\n",
- host_pfn, page_to_pfn(page));
+ spte_to_pfn(*shadow_pte), pfn);
rmap_remove(vcpu->kvm, shadow_pte);
+ } else {
+ if (largepage)
+ was_rmapped = is_large_pte(*shadow_pte);
+ else
+ was_rmapped = 1;
}
- else
- was_rmapped = 1;
}
/*
@@ -917,6 +1074,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
* demand paging).
*/
spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
+ if (!speculative)
+ pte_access |= PT_ACCESSED_MASK;
if (!dirty)
pte_access &= ~ACC_WRITE_MASK;
if (!(pte_access & ACC_EXEC_MASK))
@@ -925,15 +1084,10 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
spte |= PT_PRESENT_MASK;
if (pte_access & ACC_USER_MASK)
spte |= PT_USER_MASK;
+ if (largepage)
+ spte |= PT_PAGE_SIZE_MASK;
- if (is_error_page(page)) {
- set_shadow_pte(shadow_pte,
- shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
- kvm_release_page_clean(page);
- return;
- }
-
- spte |= page_to_phys(page);
+ spte |= (u64)pfn << PAGE_SHIFT;
if ((pte_access & ACC_WRITE_MASK)
|| (write_fault && !is_write_protection(vcpu) && !user_fault)) {
@@ -946,9 +1100,10 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
}
shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
- if (shadow) {
+ if (shadow ||
+ (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
pgprintk("%s: found shadow page for %lx, marking ro\n",
- __FUNCTION__, gfn);
+ __func__, gfn);
pte_access &= ~ACC_WRITE_MASK;
if (is_writeble_pte(spte)) {
spte &= ~PT_WRITABLE_MASK;
@@ -964,18 +1119,25 @@ unshadowed:
if (pte_access & ACC_WRITE_MASK)
mark_page_dirty(vcpu->kvm, gfn);
- pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
+ pgprintk("%s: setting spte %llx\n", __func__, spte);
+ pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n",
+ (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
+ (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
set_shadow_pte(shadow_pte, spte);
+ if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK)
+ && (spte & PT_PRESENT_MASK))
+ ++vcpu->kvm->stat.lpages;
+
page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
if (!was_rmapped) {
- rmap_add(vcpu, shadow_pte, gfn);
+ rmap_add(vcpu, shadow_pte, gfn, largepage);
if (!is_rmap_pte(*shadow_pte))
- kvm_release_page_clean(page);
+ kvm_release_pfn_clean(pfn);
} else {
if (was_writeble)
- kvm_release_page_dirty(page);
+ kvm_release_pfn_dirty(pfn);
else
- kvm_release_page_clean(page);
+ kvm_release_pfn_clean(pfn);
}
if (!ptwrite || !*ptwrite)
vcpu->arch.last_pte_updated = shadow_pte;
@@ -985,10 +1147,10 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
{
}
-static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
- gfn_t gfn, struct page *page)
+static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
+ int largepage, gfn_t gfn, pfn_t pfn,
+ int level)
{
- int level = PT32E_ROOT_LEVEL;
hpa_t table_addr = vcpu->arch.mmu.root_hpa;
int pt_write = 0;
@@ -1001,8 +1163,14 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
if (level == 1) {
mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
- 0, write, 1, &pt_write, gfn, page);
- return pt_write || is_io_pte(table[index]);
+ 0, write, 1, &pt_write, 0, gfn, pfn, false);
+ return pt_write;
+ }
+
+ if (largepage && level == 2) {
+ mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
+ 0, write, 1, &pt_write, 1, gfn, pfn, false);
+ return pt_write;
}
if (table[index] == shadow_trap_nonpresent_pte) {
@@ -1016,7 +1184,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
1, ACC_ALL, &table[index]);
if (!new_table) {
pgprintk("nonpaging_map: ENOMEM\n");
- kvm_release_page_clean(page);
+ kvm_release_pfn_clean(pfn);
return -ENOMEM;
}
@@ -1030,21 +1198,30 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
{
int r;
-
- struct page *page;
-
- down_read(&vcpu->kvm->slots_lock);
+ int largepage = 0;
+ pfn_t pfn;
down_read(&current->mm->mmap_sem);
- page = gfn_to_page(vcpu->kvm, gfn);
+ if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
+ gfn &= ~(KVM_PAGES_PER_HPAGE-1);
+ largepage = 1;
+ }
+
+ pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(&current->mm->mmap_sem);
+ /* mmio */
+ if (is_error_pfn(pfn)) {
+ kvm_release_pfn_clean(pfn);
+ return 1;
+ }
+
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
- r = __nonpaging_map(vcpu, v, write, gfn, page);
+ r = __direct_map(vcpu, v, write, largepage, gfn, pfn,
+ PT32E_ROOT_LEVEL);
spin_unlock(&vcpu->kvm->mmu_lock);
- up_read(&vcpu->kvm->slots_lock);
return r;
}
@@ -1073,6 +1250,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
sp = page_header(root);
--sp->root_count;
+ if (!sp->root_count && sp->role.invalid)
+ kvm_mmu_zap_page(vcpu->kvm, sp);
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
spin_unlock(&vcpu->kvm->mmu_lock);
return;
@@ -1085,6 +1264,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
root &= PT64_BASE_ADDR_MASK;
sp = page_header(root);
--sp->root_count;
+ if (!sp->root_count && sp->role.invalid)
+ kvm_mmu_zap_page(vcpu->kvm, sp);
}
vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
}
@@ -1097,6 +1278,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
int i;
gfn_t root_gfn;
struct kvm_mmu_page *sp;
+ int metaphysical = 0;
root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
@@ -1105,14 +1287,20 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
hpa_t root = vcpu->arch.mmu.root_hpa;
ASSERT(!VALID_PAGE(root));
+ if (tdp_enabled)
+ metaphysical = 1;
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
- PT64_ROOT_LEVEL, 0, ACC_ALL, NULL);
+ PT64_ROOT_LEVEL, metaphysical,
+ ACC_ALL, NULL);
root = __pa(sp->spt);
++sp->root_count;
vcpu->arch.mmu.root_hpa = root;
return;
}
#endif
+ metaphysical = !is_paging(vcpu);
+ if (tdp_enabled)
+ metaphysical = 1;
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i];
@@ -1126,7 +1314,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
} else if (vcpu->arch.mmu.root_level == 0)
root_gfn = 0;
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
- PT32_ROOT_LEVEL, !is_paging(vcpu),
+ PT32_ROOT_LEVEL, metaphysical,
ACC_ALL, NULL);
root = __pa(sp->spt);
++sp->root_count;
@@ -1146,7 +1334,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
gfn_t gfn;
int r;
- pgprintk("%s: gva %lx error %x\n", __FUNCTION__, gva, error_code);
+ pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
@@ -1160,6 +1348,41 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
error_code & PFERR_WRITE_MASK, gfn);
}
+static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
+ u32 error_code)
+{
+ pfn_t pfn;
+ int r;
+ int largepage = 0;
+ gfn_t gfn = gpa >> PAGE_SHIFT;
+
+ ASSERT(vcpu);
+ ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
+
+ r = mmu_topup_memory_caches(vcpu);
+ if (r)
+ return r;
+
+ down_read(&current->mm->mmap_sem);
+ if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
+ gfn &= ~(KVM_PAGES_PER_HPAGE-1);
+ largepage = 1;
+ }
+ pfn = gfn_to_pfn(vcpu->kvm, gfn);
+ up_read(&current->mm->mmap_sem);
+ if (is_error_pfn(pfn)) {
+ kvm_release_pfn_clean(pfn);
+ return 1;
+ }
+ spin_lock(&vcpu->kvm->mmu_lock);
+ kvm_mmu_free_some_pages(vcpu);
+ r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
+ largepage, gfn, pfn, TDP_ROOT_LEVEL);
+ spin_unlock(&vcpu->kvm->mmu_lock);
+
+ return r;
+}
+
static void nonpaging_free(struct kvm_vcpu *vcpu)
{
mmu_free_roots(vcpu);
@@ -1188,7 +1411,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
static void paging_new_cr3(struct kvm_vcpu *vcpu)
{
- pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3);
+ pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
mmu_free_roots(vcpu);
}
@@ -1253,7 +1476,35 @@ static int paging32E_init_context(struct kvm_vcpu *vcpu)
return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
}
-static int init_kvm_mmu(struct kvm_vcpu *vcpu)
+static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmu *context = &vcpu->arch.mmu;
+
+ context->new_cr3 = nonpaging_new_cr3;
+ context->page_fault = tdp_page_fault;
+ context->free = nonpaging_free;
+ context->prefetch_page = nonpaging_prefetch_page;
+ context->shadow_root_level = TDP_ROOT_LEVEL;
+ context->root_hpa = INVALID_PAGE;
+
+ if (!is_paging(vcpu)) {
+ context->gva_to_gpa = nonpaging_gva_to_gpa;
+ context->root_level = 0;
+ } else if (is_long_mode(vcpu)) {
+ context->gva_to_gpa = paging64_gva_to_gpa;
+ context->root_level = PT64_ROOT_LEVEL;
+ } else if (is_pae(vcpu)) {
+ context->gva_to_gpa = paging64_gva_to_gpa;
+ context->root_level = PT32E_ROOT_LEVEL;
+ } else {
+ context->gva_to_gpa = paging32_gva_to_gpa;
+ context->root_level = PT32_ROOT_LEVEL;
+ }
+
+ return 0;
+}
+
+static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -1268,6 +1519,16 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
return paging32_init_context(vcpu);
}
+static int init_kvm_mmu(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.update_pte.pfn = bad_pfn;
+
+ if (tdp_enabled)
+ return init_kvm_tdp_mmu(vcpu);
+ else
+ return init_kvm_softmmu(vcpu);
+}
+
static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
@@ -1316,7 +1577,8 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
pte = *spte;
if (is_shadow_present_pte(pte)) {
- if (sp->role.level == PT_PAGE_TABLE_LEVEL)
+ if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
+ is_large_pte(pte))
rmap_remove(vcpu->kvm, spte);
else {
child = page_header(pte & PT64_BASE_ADDR_MASK);
@@ -1324,24 +1586,26 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
}
}
set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+ if (is_large_pte(pte))
+ --vcpu->kvm->stat.lpages;
}
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp,
u64 *spte,
- const void *new, int bytes,
- int offset_in_pte)
+ const void *new)
{
- if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
+ if ((sp->role.level != PT_PAGE_TABLE_LEVEL)
+ && !vcpu->arch.update_pte.largepage) {
++vcpu->kvm->stat.mmu_pde_zapped;
return;
}
++vcpu->kvm->stat.mmu_pte_updated;
if (sp->role.glevels == PT32_ROOT_LEVEL)
- paging32_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
+ paging32_update_pte(vcpu, sp, spte, new);
else
- paging64_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
+ paging64_update_pte(vcpu, sp, spte, new);
}
static bool need_remote_flush(u64 old, u64 new)
@@ -1378,7 +1642,9 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
gfn_t gfn;
int r;
u64 gpte = 0;
- struct page *page;
+ pfn_t pfn;
+
+ vcpu->arch.update_pte.largepage = 0;
if (bytes != 4 && bytes != 8)
return;
@@ -1408,11 +1674,19 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
down_read(&current->mm->mmap_sem);
- page = gfn_to_page(vcpu->kvm, gfn);
+ if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
+ gfn &= ~(KVM_PAGES_PER_HPAGE-1);
+ vcpu->arch.update_pte.largepage = 1;
+ }
+ pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(&current->mm->mmap_sem);
+ if (is_error_pfn(pfn)) {
+ kvm_release_pfn_clean(pfn);
+ return;
+ }
vcpu->arch.update_pte.gfn = gfn;
- vcpu->arch.update_pte.page = page;
+ vcpu->arch.update_pte.pfn = pfn;
}
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
@@ -1423,7 +1697,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
struct hlist_node *node, *n;
struct hlist_head *bucket;
unsigned index;
- u64 entry;
+ u64 entry, gentry;
u64 *spte;
unsigned offset = offset_in_page(gpa);
unsigned pte_size;
@@ -1433,8 +1707,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
int level;
int flooded = 0;
int npte;
+ int r;
- pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
+ pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
@@ -1450,7 +1725,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu->arch.last_pt_write_count = 1;
vcpu->arch.last_pte_updated = NULL;
}
- index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+ index = kvm_page_table_hashfn(gfn);
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
if (sp->gfn != gfn || sp->role.metaphysical)
@@ -1496,20 +1771,29 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
continue;
}
spte = &sp->spt[page_offset / sizeof(*spte)];
+ if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
+ gentry = 0;
+ r = kvm_read_guest_atomic(vcpu->kvm,
+ gpa & ~(u64)(pte_size - 1),
+ &gentry, pte_size);
+ new = (const void *)&gentry;
+ if (r < 0)
+ new = NULL;
+ }
while (npte--) {
entry = *spte;
mmu_pte_write_zap_pte(vcpu, sp, spte);
- mmu_pte_write_new_pte(vcpu, sp, spte, new, bytes,
- page_offset & (pte_size - 1));
+ if (new)
+ mmu_pte_write_new_pte(vcpu, sp, spte, new);
mmu_pte_write_flush_tlb(vcpu, entry, *spte);
++spte;
}
}
kvm_mmu_audit(vcpu, "post pte write");
spin_unlock(&vcpu->kvm->mmu_lock);
- if (vcpu->arch.update_pte.page) {
- kvm_release_page_clean(vcpu->arch.update_pte.page);
- vcpu->arch.update_pte.page = NULL;
+ if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
+ kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
+ vcpu->arch.update_pte.pfn = bad_pfn;
}
}
@@ -1518,9 +1802,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
gpa_t gpa;
int r;
- down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
- up_read(&vcpu->kvm->slots_lock);
spin_lock(&vcpu->kvm->mmu_lock);
r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
@@ -1577,6 +1859,12 @@ out:
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
+void kvm_enable_tdp(void)
+{
+ tdp_enabled = true;
+}
+EXPORT_SYMBOL_GPL(kvm_enable_tdp);
+
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_page *sp;
@@ -1677,7 +1965,53 @@ void kvm_mmu_zap_all(struct kvm *kvm)
kvm_flush_remote_tlbs(kvm);
}
-void kvm_mmu_module_exit(void)
+void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
+{
+ struct kvm_mmu_page *page;
+
+ page = container_of(kvm->arch.active_mmu_pages.prev,
+ struct kvm_mmu_page, link);
+ kvm_mmu_zap_page(kvm, page);
+}
+
+static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
+{
+ struct kvm *kvm;
+ struct kvm *kvm_freed = NULL;
+ int cache_count = 0;
+
+ spin_lock(&kvm_lock);
+
+ list_for_each_entry(kvm, &vm_list, vm_list) {
+ int npages;
+
+ spin_lock(&kvm->mmu_lock);
+ npages = kvm->arch.n_alloc_mmu_pages -
+ kvm->arch.n_free_mmu_pages;
+ cache_count += npages;
+ if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
+ kvm_mmu_remove_one_alloc_mmu_page(kvm);
+ cache_count--;
+ kvm_freed = kvm;
+ }
+ nr_to_scan--;
+
+ spin_unlock(&kvm->mmu_lock);
+ }
+ if (kvm_freed)
+ list_move_tail(&kvm_freed->vm_list, &vm_list);
+
+ spin_unlock(&kvm_lock);
+
+ return cache_count;
+}
+
+static struct shrinker mmu_shrinker = {
+ .shrink = mmu_shrink,
+ .seeks = DEFAULT_SEEKS * 10,
+};
+
+void mmu_destroy_caches(void)
{
if (pte_chain_cache)
kmem_cache_destroy(pte_chain_cache);
@@ -1687,6 +2021,12 @@ void kvm_mmu_module_exit(void)
kmem_cache_destroy(mmu_page_header_cache);
}
+void kvm_mmu_module_exit(void)
+{
+ mmu_destroy_caches();
+ unregister_shrinker(&mmu_shrinker);
+}
+
int kvm_mmu_module_init(void)
{
pte_chain_cache = kmem_cache_create("kvm_pte_chain",
@@ -1706,10 +2046,12 @@ int kvm_mmu_module_init(void)
if (!mmu_page_header_cache)
goto nomem;
+ register_shrinker(&mmu_shrinker);
+
return 0;
nomem:
- kvm_mmu_module_exit();
+ mmu_destroy_caches();
return -ENOMEM;
}
@@ -1732,6 +2074,127 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
return nr_mmu_pages;
}
+static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
+ unsigned len)
+{
+ if (len > buffer->len)
+ return NULL;
+ return buffer->ptr;
+}
+
+static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
+ unsigned len)
+{
+ void *ret;
+
+ ret = pv_mmu_peek_buffer(buffer, len);
+ if (!ret)
+ return ret;
+ buffer->ptr += len;
+ buffer->len -= len;
+ buffer->processed += len;
+ return ret;
+}
+
+static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
+ gpa_t addr, gpa_t value)
+{
+ int bytes = 8;
+ int r;
+
+ if (!is_long_mode(vcpu) && !is_pae(vcpu))
+ bytes = 4;
+
+ r = mmu_topup_memory_caches(vcpu);
+ if (r)
+ return r;
+
+ if (!emulator_write_phys(vcpu, addr, &value, bytes))
+ return -EFAULT;
+
+ return 1;
+}
+
+static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->tlb_flush(vcpu);
+ return 1;
+}
+
+static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
+{
+ spin_lock(&vcpu->kvm->mmu_lock);
+ mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ return 1;
+}
+
+static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
+ struct kvm_pv_mmu_op_buffer *buffer)
+{
+ struct kvm_mmu_op_header *header;
+
+ header = pv_mmu_peek_buffer(buffer, sizeof *header);
+ if (!header)
+ return 0;
+ switch (header->op) {
+ case KVM_MMU_OP_WRITE_PTE: {
+ struct kvm_mmu_op_write_pte *wpte;
+
+ wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
+ if (!wpte)
+ return 0;
+ return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
+ wpte->pte_val);
+ }
+ case KVM_MMU_OP_FLUSH_TLB: {
+ struct kvm_mmu_op_flush_tlb *ftlb;
+
+ ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
+ if (!ftlb)
+ return 0;
+ return kvm_pv_mmu_flush_tlb(vcpu);
+ }
+ case KVM_MMU_OP_RELEASE_PT: {
+ struct kvm_mmu_op_release_pt *rpt;
+
+ rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
+ if (!rpt)
+ return 0;
+ return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
+ }
+ default: return 0;
+ }
+}
+
+int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
+ gpa_t addr, unsigned long *ret)
+{
+ int r;
+ struct kvm_pv_mmu_op_buffer buffer;
+
+ buffer.ptr = buffer.buf;
+ buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
+ buffer.processed = 0;
+
+ r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
+ if (r)
+ goto out;
+
+ while (buffer.len) {
+ r = kvm_pv_mmu_op_one(vcpu, &buffer);
+ if (r < 0)
+ goto out;
+ if (r == 0)
+ break;
+ }
+
+ r = 1;
+out:
+ *ret = buffer.processed;
+ return r;
+}
+
#ifdef AUDIT
static const char *audit_msg;
@@ -1768,8 +2231,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
audit_mappings_page(vcpu, ent, va, level - 1);
} else {
gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
- struct page *page = gpa_to_page(vcpu, gpa);
- hpa_t hpa = page_to_phys(page);
+ hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
if (is_shadow_present_pte(ent)
&& (ent & PT64_BASE_ADDR_MASK) != hpa)
@@ -1782,7 +2244,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
&& !is_error_hpa(hpa))
printk(KERN_ERR "audit: (%s) notrap shadow,"
" valid guest gva %lx\n", audit_msg, va);
- kvm_release_page_clean(page);
+ kvm_release_pfn_clean(pfn);
}
}
@@ -1867,7 +2329,7 @@ static void audit_rmap(struct kvm_vcpu *vcpu)
if (n_rmap != n_actual)
printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
- __FUNCTION__, audit_msg, n_rmap, n_actual);
+ __func__, audit_msg, n_rmap, n_actual);
}
static void audit_write_protection(struct kvm_vcpu *vcpu)
@@ -1887,7 +2349,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
if (*rmapp)
printk(KERN_ERR "%s: (%s) shadow page has writable"
" mappings: gfn %lx role %x\n",
- __FUNCTION__, audit_msg, sp->gfn,
+ __func__, audit_msg, sp->gfn,
sp->role.word);
}
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 1fce19ec7a23..e64e9f56a65e 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -3,6 +3,12 @@
#include <linux/kvm_host.h>
+#ifdef CONFIG_X86_64
+#define TDP_ROOT_LEVEL PT64_ROOT_LEVEL
+#else
+#define TDP_ROOT_LEVEL PT32E_ROOT_LEVEL
+#endif
+
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index ecc0856268c4..156fe10288ae 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -130,7 +130,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
unsigned index, pt_access, pte_access;
gpa_t pte_gpa;
- pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
+ pgprintk("%s: addr %lx\n", __func__, addr);
walk:
walker->level = vcpu->arch.mmu.root_level;
pte = vcpu->arch.cr3;
@@ -155,7 +155,7 @@ walk:
pte_gpa += index * sizeof(pt_element_t);
walker->table_gfn[walker->level - 1] = table_gfn;
walker->pte_gpa[walker->level - 1] = pte_gpa;
- pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
+ pgprintk("%s: table_gfn[%d] %lx\n", __func__,
walker->level - 1, table_gfn);
kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
@@ -222,7 +222,7 @@ walk:
walker->pt_access = pt_access;
walker->pte_access = pte_access;
pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
- __FUNCTION__, (u64)pte, pt_access, pte_access);
+ __func__, (u64)pte, pt_access, pte_access);
return 1;
not_present:
@@ -243,31 +243,30 @@ err:
}
static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
- u64 *spte, const void *pte, int bytes,
- int offset_in_pte)
+ u64 *spte, const void *pte)
{
pt_element_t gpte;
unsigned pte_access;
- struct page *npage;
+ pfn_t pfn;
+ int largepage = vcpu->arch.update_pte.largepage;
gpte = *(const pt_element_t *)pte;
if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
- if (!offset_in_pte && !is_present_pte(gpte))
+ if (!is_present_pte(gpte))
set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
return;
}
- if (bytes < sizeof(pt_element_t))
- return;
- pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
+ pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
return;
- npage = vcpu->arch.update_pte.page;
- if (!npage)
+ pfn = vcpu->arch.update_pte.pfn;
+ if (is_error_pfn(pfn))
return;
- get_page(npage);
+ kvm_get_pfn(pfn);
mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
- gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte), npage);
+ gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
+ pfn, true);
}
/*
@@ -275,8 +274,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
*/
static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
struct guest_walker *walker,
- int user_fault, int write_fault, int *ptwrite,
- struct page *page)
+ int user_fault, int write_fault, int largepage,
+ int *ptwrite, pfn_t pfn)
{
hpa_t shadow_addr;
int level;
@@ -304,11 +303,19 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
shadow_ent = ((u64 *)__va(shadow_addr)) + index;
if (level == PT_PAGE_TABLE_LEVEL)
break;
- if (is_shadow_present_pte(*shadow_ent)) {
+
+ if (largepage && level == PT_DIRECTORY_LEVEL)
+ break;
+
+ if (is_shadow_present_pte(*shadow_ent)
+ && !is_large_pte(*shadow_ent)) {
shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
continue;
}
+ if (is_large_pte(*shadow_ent))
+ rmap_remove(vcpu->kvm, shadow_ent);
+
if (level - 1 == PT_PAGE_TABLE_LEVEL
&& walker->level == PT_DIRECTORY_LEVEL) {
metaphysical = 1;
@@ -329,7 +336,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
walker->pte_gpa[level - 2],
&curr_pte, sizeof(curr_pte));
if (r || curr_pte != walker->ptes[level - 2]) {
- kvm_release_page_clean(page);
+ kvm_release_pfn_clean(pfn);
return NULL;
}
}
@@ -342,7 +349,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
user_fault, write_fault,
walker->ptes[walker->level-1] & PT_DIRTY_MASK,
- ptwrite, walker->gfn, page);
+ ptwrite, largepage, walker->gfn, pfn, false);
return shadow_ent;
}
@@ -371,16 +378,16 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
u64 *shadow_pte;
int write_pt = 0;
int r;
- struct page *page;
+ pfn_t pfn;
+ int largepage = 0;
- pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
+ pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
kvm_mmu_audit(vcpu, "pre page fault");
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
- down_read(&vcpu->kvm->slots_lock);
/*
* Look up the shadow pte for the faulting address.
*/
@@ -391,40 +398,45 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
* The page is not mapped by the guest. Let the guest handle it.
*/
if (!r) {
- pgprintk("%s: guest page fault\n", __FUNCTION__);
+ pgprintk("%s: guest page fault\n", __func__);
inject_page_fault(vcpu, addr, walker.error_code);
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
- up_read(&vcpu->kvm->slots_lock);
return 0;
}
down_read(&current->mm->mmap_sem);
- page = gfn_to_page(vcpu->kvm, walker.gfn);
+ if (walker.level == PT_DIRECTORY_LEVEL) {
+ gfn_t large_gfn;
+ large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
+ if (is_largepage_backed(vcpu, large_gfn)) {
+ walker.gfn = large_gfn;
+ largepage = 1;
+ }
+ }
+ pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
up_read(&current->mm->mmap_sem);
+ /* mmio */
+ if (is_error_pfn(pfn)) {
+ pgprintk("gfn %x is mmio\n", walker.gfn);
+ kvm_release_pfn_clean(pfn);
+ return 1;
+ }
+
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
- &write_pt, page);
- pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
+ largepage, &write_pt, pfn);
+
+ pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
shadow_pte, *shadow_pte, write_pt);
if (!write_pt)
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
- /*
- * mmio: emulate if accessible, otherwise its a guest fault.
- */
- if (shadow_pte && is_io_pte(*shadow_pte)) {
- spin_unlock(&vcpu->kvm->mmu_lock);
- up_read(&vcpu->kvm->slots_lock);
- return 1;
- }
-
++vcpu->stat.pf_fixed;
kvm_mmu_audit(vcpu, "post page fault (fixed)");
spin_unlock(&vcpu->kvm->mmu_lock);
- up_read(&vcpu->kvm->slots_lock);
return write_pt;
}
diff --git a/arch/x86/kvm/segment_descriptor.h b/arch/x86/kvm/segment_descriptor.h
deleted file mode 100644
index 56fc4c873389..000000000000
--- a/arch/x86/kvm/segment_descriptor.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __SEGMENT_DESCRIPTOR_H
-#define __SEGMENT_DESCRIPTOR_H
-
-struct segment_descriptor {
- u16 limit_low;
- u16 base_low;
- u8 base_mid;
- u8 type : 4;
- u8 system : 1;
- u8 dpl : 2;
- u8 present : 1;
- u8 limit_high : 4;
- u8 avl : 1;
- u8 long_mode : 1;
- u8 default_op : 1;
- u8 granularity : 1;
- u8 base_high;
-} __attribute__((packed));
-
-#ifdef CONFIG_X86_64
-/* LDT or TSS descriptor in the GDT. 16 bytes. */
-struct segment_descriptor_64 {
- struct segment_descriptor s;
- u32 base_higher;
- u32 pad_zero;
-};
-
-#endif
-#endif
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1a582f1090e8..89e0be2c10d0 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -47,6 +47,18 @@ MODULE_LICENSE("GPL");
#define SVM_FEATURE_LBRV (1 << 1)
#define SVM_DEATURE_SVML (1 << 2)
+#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
+
+/* enable NPT for AMD64 and X86 with PAE */
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+static bool npt_enabled = true;
+#else
+static bool npt_enabled = false;
+#endif
+static int npt = 1;
+
+module_param(npt, int, S_IRUGO);
+
static void kvm_reput_irq(struct vcpu_svm *svm);
static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
@@ -54,8 +66,7 @@ static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
return container_of(vcpu, struct vcpu_svm, vcpu);
}
-unsigned long iopm_base;
-unsigned long msrpm_base;
+static unsigned long iopm_base;
struct kvm_ldttss_desc {
u16 limit0;
@@ -182,7 +193,7 @@ static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
- if (!(efer & EFER_LMA))
+ if (!npt_enabled && !(efer & EFER_LMA))
efer &= ~EFER_LME;
to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
@@ -219,12 +230,12 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
if (!svm->next_rip) {
- printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__);
+ printk(KERN_DEBUG "%s: NOP\n", __func__);
return;
}
if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE)
printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n",
- __FUNCTION__,
+ __func__,
svm->vmcb->save.rip,
svm->next_rip);
@@ -279,11 +290,7 @@ static void svm_hardware_enable(void *garbage)
struct svm_cpu_data *svm_data;
uint64_t efer;
-#ifdef CONFIG_X86_64
- struct desc_ptr gdt_descr;
-#else
struct desc_ptr gdt_descr;
-#endif
struct desc_struct *gdt;
int me = raw_smp_processor_id();
@@ -302,7 +309,6 @@ static void svm_hardware_enable(void *garbage)
svm_data->asid_generation = 1;
svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
svm_data->next_asid = svm_data->max_asid + 1;
- svm_features = cpuid_edx(SVM_CPUID_FUNC);
asm volatile ("sgdt %0" : "=m"(gdt_descr));
gdt = (struct desc_struct *)gdt_descr.address;
@@ -361,12 +367,51 @@ static void set_msr_interception(u32 *msrpm, unsigned msr,
BUG();
}
+static void svm_vcpu_init_msrpm(u32 *msrpm)
+{
+ memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
+
+#ifdef CONFIG_X86_64
+ set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
+ set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
+ set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
+ set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
+ set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
+ set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
+#endif
+ set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
+ set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
+ set_msr_interception(msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
+ set_msr_interception(msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
+}
+
+static void svm_enable_lbrv(struct vcpu_svm *svm)
+{
+ u32 *msrpm = svm->msrpm;
+
+ svm->vmcb->control.lbr_ctl = 1;
+ set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
+ set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
+ set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
+ set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
+}
+
+static void svm_disable_lbrv(struct vcpu_svm *svm)
+{
+ u32 *msrpm = svm->msrpm;
+
+ svm->vmcb->control.lbr_ctl = 0;
+ set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
+ set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
+ set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
+ set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
+}
+
static __init int svm_hardware_setup(void)
{
int cpu;
struct page *iopm_pages;
- struct page *msrpm_pages;
- void *iopm_va, *msrpm_va;
+ void *iopm_va;
int r;
iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
@@ -379,41 +424,33 @@ static __init int svm_hardware_setup(void)
clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
+ if (boot_cpu_has(X86_FEATURE_NX))
+ kvm_enable_efer_bits(EFER_NX);
- msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
+ for_each_online_cpu(cpu) {
+ r = svm_cpu_init(cpu);
+ if (r)
+ goto err;
+ }
- r = -ENOMEM;
- if (!msrpm_pages)
- goto err_1;
+ svm_features = cpuid_edx(SVM_CPUID_FUNC);
- msrpm_va = page_address(msrpm_pages);
- memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
- msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT;
+ if (!svm_has(SVM_FEATURE_NPT))
+ npt_enabled = false;
-#ifdef CONFIG_X86_64
- set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1);
- set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1);
- set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1);
- set_msr_interception(msrpm_va, MSR_LSTAR, 1, 1);
- set_msr_interception(msrpm_va, MSR_CSTAR, 1, 1);
- set_msr_interception(msrpm_va, MSR_SYSCALL_MASK, 1, 1);
-#endif
- set_msr_interception(msrpm_va, MSR_K6_STAR, 1, 1);
- set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_CS, 1, 1);
- set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_ESP, 1, 1);
- set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_EIP, 1, 1);
+ if (npt_enabled && !npt) {
+ printk(KERN_INFO "kvm: Nested Paging disabled\n");
+ npt_enabled = false;
+ }
- for_each_online_cpu(cpu) {
- r = svm_cpu_init(cpu);
- if (r)
- goto err_2;
+ if (npt_enabled) {
+ printk(KERN_INFO "kvm: Nested Paging enabled\n");
+ kvm_enable_tdp();
}
+
return 0;
-err_2:
- __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
- msrpm_base = 0;
-err_1:
+err:
__free_pages(iopm_pages, IOPM_ALLOC_ORDER);
iopm_base = 0;
return r;
@@ -421,9 +458,8 @@ err_1:
static __exit void svm_hardware_unsetup(void)
{
- __free_pages(pfn_to_page(msrpm_base >> PAGE_SHIFT), MSRPM_ALLOC_ORDER);
__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
- iopm_base = msrpm_base = 0;
+ iopm_base = 0;
}
static void init_seg(struct vmcb_seg *seg)
@@ -443,15 +479,14 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
seg->base = 0;
}
-static void init_vmcb(struct vmcb *vmcb)
+static void init_vmcb(struct vcpu_svm *svm)
{
- struct vmcb_control_area *control = &vmcb->control;
- struct vmcb_save_area *save = &vmcb->save;
+ struct vmcb_control_area *control = &svm->vmcb->control;
+ struct vmcb_save_area *save = &svm->vmcb->save;
control->intercept_cr_read = INTERCEPT_CR0_MASK |
INTERCEPT_CR3_MASK |
- INTERCEPT_CR4_MASK |
- INTERCEPT_CR8_MASK;
+ INTERCEPT_CR4_MASK;
control->intercept_cr_write = INTERCEPT_CR0_MASK |
INTERCEPT_CR3_MASK |
@@ -471,23 +506,13 @@ static void init_vmcb(struct vmcb *vmcb)
INTERCEPT_DR7_MASK;
control->intercept_exceptions = (1 << PF_VECTOR) |
- (1 << UD_VECTOR);
+ (1 << UD_VECTOR) |
+ (1 << MC_VECTOR);
control->intercept = (1ULL << INTERCEPT_INTR) |
(1ULL << INTERCEPT_NMI) |
(1ULL << INTERCEPT_SMI) |
- /*
- * selective cr0 intercept bug?
- * 0: 0f 22 d8 mov %eax,%cr3
- * 3: 0f 20 c0 mov %cr0,%eax
- * 6: 0d 00 00 00 80 or $0x80000000,%eax
- * b: 0f 22 c0 mov %eax,%cr0
- * set cr3 ->interception
- * get cr0 ->interception
- * set cr0 -> no interception
- */
- /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */
(1ULL << INTERCEPT_CPUID) |
(1ULL << INTERCEPT_INVD) |
(1ULL << INTERCEPT_HLT) |
@@ -508,7 +533,7 @@ static void init_vmcb(struct vmcb *vmcb)
(1ULL << INTERCEPT_MWAIT);
control->iopm_base_pa = iopm_base;
- control->msrpm_base_pa = msrpm_base;
+ control->msrpm_base_pa = __pa(svm->msrpm);
control->tsc_offset = 0;
control->int_ctl = V_INTR_MASKING_MASK;
@@ -550,13 +575,30 @@ static void init_vmcb(struct vmcb *vmcb)
save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
save->cr4 = X86_CR4_PAE;
/* rdx = ?? */
+
+ if (npt_enabled) {
+ /* Setup VMCB for Nested Paging */
+ control->nested_ctl = 1;
+ control->intercept &= ~(1ULL << INTERCEPT_TASK_SWITCH);
+ control->intercept_exceptions &= ~(1 << PF_VECTOR);
+ control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK|
+ INTERCEPT_CR3_MASK);
+ control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
+ INTERCEPT_CR3_MASK);
+ save->g_pat = 0x0007040600070406ULL;
+ /* enable caching because the QEMU Bios doesn't enable it */
+ save->cr0 = X86_CR0_ET;
+ save->cr3 = 0;
+ save->cr4 = 0;
+ }
+ force_new_asid(&svm->vcpu);
}
static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- init_vmcb(svm->vmcb);
+ init_vmcb(svm);
if (vcpu->vcpu_id != 0) {
svm->vmcb->save.rip = 0;
@@ -571,6 +613,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
{
struct vcpu_svm *svm;
struct page *page;
+ struct page *msrpm_pages;
int err;
svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
@@ -589,12 +632,19 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
goto uninit;
}
+ err = -ENOMEM;
+ msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
+ if (!msrpm_pages)
+ goto uninit;
+ svm->msrpm = page_address(msrpm_pages);
+ svm_vcpu_init_msrpm(svm->msrpm);
+
svm->vmcb = page_address(page);
clear_page(svm->vmcb);
svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
svm->asid_generation = 0;
memset(svm->db_regs, 0, sizeof(svm->db_regs));
- init_vmcb(svm->vmcb);
+ init_vmcb(svm);
fx_init(&svm->vcpu);
svm->vcpu.fpu_active = 1;
@@ -617,6 +667,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
__free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
+ __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, svm);
}
@@ -731,6 +782,13 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
var->unusable = !var->present;
}
+static int svm_get_cpl(struct kvm_vcpu *vcpu)
+{
+ struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
+
+ return save->cpl;
+}
+
static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -784,6 +842,9 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
}
}
#endif
+ if (npt_enabled)
+ goto set;
+
if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
vcpu->fpu_active = 1;
@@ -791,18 +852,29 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
vcpu->arch.cr0 = cr0;
cr0 |= X86_CR0_PG | X86_CR0_WP;
- cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
if (!vcpu->fpu_active) {
svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
cr0 |= X86_CR0_TS;
}
+set:
+ /*
+ * re-enable caching here because the QEMU bios
+ * does not do it - this results in some delay at
+ * reboot
+ */
+ cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
svm->vmcb->save.cr0 = cr0;
}
static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
- vcpu->arch.cr4 = cr4;
- to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE;
+ unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
+
+ vcpu->arch.cr4 = cr4;
+ if (!npt_enabled)
+ cr4 |= X86_CR4_PAE;
+ cr4 |= host_cr4_mce;
+ to_svm(vcpu)->vmcb->save.cr4 = cr4;
}
static void svm_set_segment(struct kvm_vcpu *vcpu,
@@ -833,13 +905,6 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
}
-/* FIXME:
-
- svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK;
- svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
-
-*/
-
static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
{
return -EOPNOTSUPP;
@@ -920,7 +985,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
}
default:
printk(KERN_DEBUG "%s: unexpected dr %u\n",
- __FUNCTION__, dr);
+ __func__, dr);
*exception = UD_VECTOR;
return;
}
@@ -962,6 +1027,19 @@ static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
return 1;
}
+static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+ /*
+ * On an #MC intercept the MCE handler is not called automatically in
+ * the host. So do it by hand here.
+ */
+ asm volatile (
+ "int $0x12\n");
+ /* not sure if we ever come back to this point */
+
+ return 1;
+}
+
static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{
/*
@@ -969,7 +1047,7 @@ static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
* so reinitialize it.
*/
clear_page(svm->vmcb);
- init_vmcb(svm->vmcb);
+ init_vmcb(svm);
kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
return 0;
@@ -1033,9 +1111,18 @@ static int invalid_op_interception(struct vcpu_svm *svm,
static int task_switch_interception(struct vcpu_svm *svm,
struct kvm_run *kvm_run)
{
- pr_unimpl(&svm->vcpu, "%s: task switch is unsupported\n", __FUNCTION__);
- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
- return 0;
+ u16 tss_selector;
+
+ tss_selector = (u16)svm->vmcb->control.exit_info_1;
+ if (svm->vmcb->control.exit_info_2 &
+ (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
+ return kvm_task_switch(&svm->vcpu, tss_selector,
+ TASK_SWITCH_IRET);
+ if (svm->vmcb->control.exit_info_2 &
+ (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
+ return kvm_task_switch(&svm->vcpu, tss_selector,
+ TASK_SWITCH_JMP);
+ return kvm_task_switch(&svm->vcpu, tss_selector, TASK_SWITCH_CALL);
}
static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
@@ -1049,7 +1136,7 @@ static int emulate_on_interception(struct vcpu_svm *svm,
struct kvm_run *kvm_run)
{
if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE)
- pr_unimpl(&svm->vcpu, "%s: failed\n", __FUNCTION__);
+ pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
return 1;
}
@@ -1179,8 +1266,19 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
svm->vmcb->save.sysenter_esp = data;
break;
case MSR_IA32_DEBUGCTLMSR:
- pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
- __FUNCTION__, data);
+ if (!svm_has(SVM_FEATURE_LBRV)) {
+ pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
+ __func__, data);
+ break;
+ }
+ if (data & DEBUGCTL_RESERVED_BITS)
+ return 1;
+
+ svm->vmcb->save.dbgctl = data;
+ if (data & (1ULL<<0))
+ svm_enable_lbrv(svm);
+ else
+ svm_disable_lbrv(svm);
break;
case MSR_K7_EVNTSEL0:
case MSR_K7_EVNTSEL1:
@@ -1265,6 +1363,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
[SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
[SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
[SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
+ [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
[SVM_EXIT_INTR] = nop_on_interception,
[SVM_EXIT_NMI] = nop_on_interception,
[SVM_EXIT_SMI] = nop_on_interception,
@@ -1290,14 +1389,34 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
[SVM_EXIT_WBINVD] = emulate_on_interception,
[SVM_EXIT_MONITOR] = invalid_op_interception,
[SVM_EXIT_MWAIT] = invalid_op_interception,
+ [SVM_EXIT_NPF] = pf_interception,
};
-
static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
u32 exit_code = svm->vmcb->control.exit_code;
+ if (npt_enabled) {
+ int mmu_reload = 0;
+ if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
+ svm_set_cr0(vcpu, svm->vmcb->save.cr0);
+ mmu_reload = 1;
+ }
+ vcpu->arch.cr0 = svm->vmcb->save.cr0;
+ vcpu->arch.cr3 = svm->vmcb->save.cr3;
+ if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
+ if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+ }
+ if (mmu_reload) {
+ kvm_mmu_reset_context(vcpu);
+ kvm_mmu_load(vcpu);
+ }
+ }
+
kvm_reput_irq(svm);
if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
@@ -1308,10 +1427,11 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
}
if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
- exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR)
+ exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
+ exit_code != SVM_EXIT_NPF)
printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
"exit_code 0x%x\n",
- __FUNCTION__, svm->vmcb->control.exit_int_info,
+ __func__, svm->vmcb->control.exit_int_info,
exit_code);
if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
@@ -1364,6 +1484,27 @@ static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
svm_inject_irq(svm, irq);
}
+static void update_cr8_intercept(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct vmcb *vmcb = svm->vmcb;
+ int max_irr, tpr;
+
+ if (!irqchip_in_kernel(vcpu->kvm) || vcpu->arch.apic->vapic_addr)
+ return;
+
+ vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
+
+ max_irr = kvm_lapic_find_highest_irr(vcpu);
+ if (max_irr == -1)
+ return;
+
+ tpr = kvm_lapic_get_cr8(vcpu) << 4;
+
+ if (tpr >= (max_irr & 0xf0))
+ vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
+}
+
static void svm_intr_assist(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1376,14 +1517,14 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
SVM_EVTINJ_VEC_MASK;
vmcb->control.exit_int_info = 0;
svm_inject_irq(svm, intr_vector);
- return;
+ goto out;
}
if (vmcb->control.int_ctl & V_IRQ_MASK)
- return;
+ goto out;
if (!kvm_cpu_has_interrupt(vcpu))
- return;
+ goto out;
if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
@@ -1391,12 +1532,14 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
/* unable to deliver irq, set pending irq */
vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
svm_inject_irq(svm, 0x0);
- return;
+ goto out;
}
/* Okay, we can deliver the interrupt: grab it and update PIC state. */
intr_vector = kvm_cpu_get_interrupt(vcpu);
svm_inject_irq(svm, intr_vector);
kvm_timer_intr_post(vcpu, intr_vector);
+out:
+ update_cr8_intercept(vcpu);
}
static void kvm_reput_irq(struct vcpu_svm *svm)
@@ -1482,6 +1625,29 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{
}
+static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
+ int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
+ kvm_lapic_set_tpr(vcpu, cr8);
+ }
+}
+
+static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ u64 cr8;
+
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return;
+
+ cr8 = kvm_get_cr8(vcpu);
+ svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
+ svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
+}
+
static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1491,6 +1657,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
pre_svm_run(svm);
+ sync_lapic_to_cr8(vcpu);
+
save_host_msrs(vcpu);
fs_selector = read_fs();
gs_selector = read_gs();
@@ -1499,6 +1667,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
svm->host_dr6 = read_dr6();
svm->host_dr7 = read_dr7();
svm->vmcb->save.cr2 = vcpu->arch.cr2;
+ /* required for live migration with NPT */
+ if (npt_enabled)
+ svm->vmcb->save.cr3 = vcpu->arch.cr3;
if (svm->vmcb->save.dr7 & 0xff) {
write_dr7(0);
@@ -1635,6 +1806,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
stgi();
+ sync_cr8_to_lapic(vcpu);
+
svm->next_rip = 0;
}
@@ -1642,6 +1815,12 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (npt_enabled) {
+ svm->vmcb->control.nested_cr3 = root;
+ force_new_asid(vcpu);
+ return;
+ }
+
svm->vmcb->save.cr3 = root;
force_new_asid(vcpu);
@@ -1709,6 +1888,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.get_segment_base = svm_get_segment_base,
.get_segment = svm_get_segment,
.set_segment = svm_set_segment,
+ .get_cpl = svm_get_cpl,
.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
.decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
.set_cr0 = svm_set_cr0,
diff --git a/arch/x86/kvm/svm.h b/arch/x86/kvm/svm.h
index 5fd50491b555..1b8afa78e869 100644
--- a/arch/x86/kvm/svm.h
+++ b/arch/x86/kvm/svm.h
@@ -238,6 +238,9 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
+#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36
+#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
+
#define SVM_EXIT_READ_CR0 0x000
#define SVM_EXIT_READ_CR3 0x003
#define SVM_EXIT_READ_CR4 0x004
diff --git a/arch/x86/kvm/tss.h b/arch/x86/kvm/tss.h
new file mode 100644
index 000000000000..622aa10f692f
--- /dev/null
+++ b/arch/x86/kvm/tss.h
@@ -0,0 +1,59 @@
+#ifndef __TSS_SEGMENT_H
+#define __TSS_SEGMENT_H
+
+struct tss_segment_32 {
+ u32 prev_task_link;
+ u32 esp0;
+ u32 ss0;
+ u32 esp1;
+ u32 ss1;
+ u32 esp2;
+ u32 ss2;
+ u32 cr3;
+ u32 eip;
+ u32 eflags;
+ u32 eax;
+ u32 ecx;
+ u32 edx;
+ u32 ebx;
+ u32 esp;
+ u32 ebp;
+ u32 esi;
+ u32 edi;
+ u32 es;
+ u32 cs;
+ u32 ss;
+ u32 ds;
+ u32 fs;
+ u32 gs;
+ u32 ldt_selector;
+ u16 t;
+ u16 io_map;
+};
+
+struct tss_segment_16 {
+ u16 prev_task_link;
+ u16 sp0;
+ u16 ss0;
+ u16 sp1;
+ u16 ss1;
+ u16 sp2;
+ u16 ss2;
+ u16 ip;
+ u16 flag;
+ u16 ax;
+ u16 cx;
+ u16 dx;
+ u16 bx;
+ u16 sp;
+ u16 bp;
+ u16 si;
+ u16 di;
+ u16 es;
+ u16 cs;
+ u16 ss;
+ u16 ds;
+ u16 ldt;
+};
+
+#endif
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8e1462880d1f..8e5d6645b90d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -17,7 +17,6 @@
#include "irq.h"
#include "vmx.h"
-#include "segment_descriptor.h"
#include "mmu.h"
#include <linux/kvm_host.h>
@@ -37,6 +36,12 @@ MODULE_LICENSE("GPL");
static int bypass_guest_pf = 1;
module_param(bypass_guest_pf, bool, 0);
+static int enable_vpid = 1;
+module_param(enable_vpid, bool, 0);
+
+static int flexpriority_enabled = 1;
+module_param(flexpriority_enabled, bool, 0);
+
struct vmcs {
u32 revision_id;
u32 abort;
@@ -71,6 +76,7 @@ struct vcpu_vmx {
unsigned rip;
} irq;
} rmode;
+ int vpid;
};
static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -85,6 +91,10 @@ static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
static struct page *vmx_io_bitmap_a;
static struct page *vmx_io_bitmap_b;
+static struct page *vmx_msr_bitmap;
+
+static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
+static DEFINE_SPINLOCK(vmx_vpid_lock);
static struct vmcs_config {
int size;
@@ -176,6 +186,11 @@ static inline int is_external_interrupt(u32 intr_info)
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
}
+static inline int cpu_has_vmx_msr_bitmap(void)
+{
+ return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS);
+}
+
static inline int cpu_has_vmx_tpr_shadow(void)
{
return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
@@ -194,8 +209,9 @@ static inline int cpu_has_secondary_exec_ctrls(void)
static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
{
- return (vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
+ return flexpriority_enabled
+ && (vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
}
static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
@@ -204,6 +220,12 @@ static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
(irqchip_in_kernel(kvm)));
}
+static inline int cpu_has_vmx_vpid(void)
+{
+ return (vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_ENABLE_VPID);
+}
+
static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
{
int i;
@@ -214,6 +236,20 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
return -1;
}
+static inline void __invvpid(int ext, u16 vpid, gva_t gva)
+{
+ struct {
+ u64 vpid : 16;
+ u64 rsvd : 48;
+ u64 gva;
+ } operand = { vpid, 0, gva };
+
+ asm volatile (ASM_VMX_INVVPID
+ /* CF==1 or ZF==1 --> rc = -1 */
+ "; ja 1f ; ud2 ; 1:"
+ : : "a"(&operand), "c"(ext) : "cc", "memory");
+}
+
static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
{
int i;
@@ -257,6 +293,14 @@ static void vcpu_clear(struct vcpu_vmx *vmx)
vmx->launched = 0;
}
+static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
+{
+ if (vmx->vpid == 0)
+ return;
+
+ __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
+}
+
static unsigned long vmcs_readl(unsigned long field)
{
unsigned long value;
@@ -353,7 +397,7 @@ static void reload_tss(void)
* VT restores TR but not its size. Useless.
*/
struct descriptor_table gdt;
- struct segment_descriptor *descs;
+ struct desc_struct *descs;
get_gdt(&gdt);
descs = (void *)gdt.base;
@@ -485,11 +529,12 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 phys_addr = __pa(vmx->vmcs);
- u64 tsc_this, delta;
+ u64 tsc_this, delta, new_offset;
if (vcpu->cpu != cpu) {
vcpu_clear(vmx);
kvm_migrate_apic_timer(vcpu);
+ vpid_sync_vcpu_all(vmx);
}
if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
@@ -524,8 +569,11 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
* Make sure the time stamp counter is monotonous.
*/
rdtscll(tsc_this);
- delta = vcpu->arch.host_tsc - tsc_this;
- vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
+ if (tsc_this < vcpu->arch.host_tsc) {
+ delta = vcpu->arch.host_tsc - tsc_this;
+ new_offset = vmcs_read64(TSC_OFFSET) + delta;
+ vmcs_write64(TSC_OFFSET, new_offset);
+ }
}
}
@@ -596,7 +644,7 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
{
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
nr | INTR_TYPE_EXCEPTION
- | (has_error_code ? INTR_INFO_DELIEVER_CODE_MASK : 0)
+ | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0)
| INTR_INFO_VALID_MASK);
if (has_error_code)
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
@@ -959,6 +1007,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
CPU_BASED_MOV_DR_EXITING |
CPU_BASED_USE_TSC_OFFSETING;
opt = CPU_BASED_TPR_SHADOW |
+ CPU_BASED_USE_MSR_BITMAPS |
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
&_cpu_based_exec_control) < 0)
@@ -971,7 +1020,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
min = 0;
opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
- SECONDARY_EXEC_WBINVD_EXITING;
+ SECONDARY_EXEC_WBINVD_EXITING |
+ SECONDARY_EXEC_ENABLE_VPID;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0)
return -EIO;
@@ -1080,6 +1130,10 @@ static __init int hardware_setup(void)
{
if (setup_vmcs_config(&vmcs_config) < 0)
return -EIO;
+
+ if (boot_cpu_has(X86_FEATURE_NX))
+ kvm_enable_efer_bits(EFER_NX);
+
return alloc_kvm_area();
}
@@ -1214,7 +1268,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
- __FUNCTION__);
+ __func__);
vmcs_write32(GUEST_TR_AR_BYTES,
(guest_tr_ar & ~AR_TYPE_MASK)
| AR_TYPE_BUSY_64_TSS);
@@ -1239,6 +1293,11 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
#endif
+static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
+{
+ vpid_sync_vcpu_all(to_vmx(vcpu));
+}
+
static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
{
vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
@@ -1275,6 +1334,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
+ vmx_flush_tlb(vcpu);
vmcs_writel(GUEST_CR3, cr3);
if (vcpu->arch.cr0 & X86_CR0_PE)
vmx_fpu_deactivate(vcpu);
@@ -1288,14 +1348,14 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vcpu->arch.cr4 = cr4;
}
-#ifdef CONFIG_X86_64
-
static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
vcpu->arch.shadow_efer = efer;
+ if (!msr)
+ return;
if (efer & EFER_LMA) {
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS) |
@@ -1312,8 +1372,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
setup_msrs(vmx);
}
-#endif
-
static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
@@ -1344,6 +1402,20 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
var->unusable = (ar >> 16) & 1;
}
+static int vmx_get_cpl(struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment kvm_seg;
+
+ if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */
+ return 0;
+
+ if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
+ return 3;
+
+ vmx_get_segment(vcpu, &kvm_seg, VCPU_SREG_CS);
+ return kvm_seg.selector & 3;
+}
+
static u32 vmx_segment_access_rights(struct kvm_segment *var)
{
u32 ar;
@@ -1433,7 +1505,6 @@ static int init_rmode_tss(struct kvm *kvm)
int ret = 0;
int r;
- down_read(&kvm->slots_lock);
r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
if (r < 0)
goto out;
@@ -1456,7 +1527,6 @@ static int init_rmode_tss(struct kvm *kvm)
ret = 1;
out:
- up_read(&kvm->slots_lock);
return ret;
}
@@ -1494,6 +1564,46 @@ out:
return r;
}
+static void allocate_vpid(struct vcpu_vmx *vmx)
+{
+ int vpid;
+
+ vmx->vpid = 0;
+ if (!enable_vpid || !cpu_has_vmx_vpid())
+ return;
+ spin_lock(&vmx_vpid_lock);
+ vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
+ if (vpid < VMX_NR_VPIDS) {
+ vmx->vpid = vpid;
+ __set_bit(vpid, vmx_vpid_bitmap);
+ }
+ spin_unlock(&vmx_vpid_lock);
+}
+
+void vmx_disable_intercept_for_msr(struct page *msr_bitmap, u32 msr)
+{
+ void *va;
+
+ if (!cpu_has_vmx_msr_bitmap())
+ return;
+
+ /*
+ * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
+ * have the write-low and read-high bitmap offsets the wrong way round.
+ * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
+ */
+ va = kmap(msr_bitmap);
+ if (msr <= 0x1fff) {
+ __clear_bit(msr, va + 0x000); /* read-low */
+ __clear_bit(msr, va + 0x800); /* write-low */
+ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+ msr &= 0x1fff;
+ __clear_bit(msr, va + 0x400); /* read-high */
+ __clear_bit(msr, va + 0xc00); /* write-high */
+ }
+ kunmap(msr_bitmap);
+}
+
/*
* Sets up the vmcs for emulated real mode.
*/
@@ -1511,6 +1621,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
+ if (cpu_has_vmx_msr_bitmap())
+ vmcs_write64(MSR_BITMAP, page_to_phys(vmx_msr_bitmap));
+
vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
/* Control */
@@ -1532,6 +1645,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
exec_control &=
~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ if (vmx->vpid == 0)
+ exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
}
@@ -1613,6 +1728,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
u64 msr;
int ret;
+ down_read(&vcpu->kvm->slots_lock);
if (!init_rmode_tss(vmx->vcpu.kvm)) {
ret = -ENOMEM;
goto out;
@@ -1621,7 +1737,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmx->vcpu.arch.rmode.active = 0;
vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
- set_cr8(&vmx->vcpu, 0);
+ kvm_set_cr8(&vmx->vcpu, 0);
msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
if (vmx->vcpu.vcpu_id == 0)
msr |= MSR_IA32_APICBASE_BSP;
@@ -1704,18 +1820,22 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmcs_write64(APIC_ACCESS_ADDR,
page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
+ if (vmx->vpid != 0)
+ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
+
vmx->vcpu.arch.cr0 = 0x60000010;
vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
vmx_set_cr4(&vmx->vcpu, 0);
-#ifdef CONFIG_X86_64
vmx_set_efer(&vmx->vcpu, 0);
-#endif
vmx_fpu_activate(&vmx->vcpu);
update_exception_bitmap(&vmx->vcpu);
- return 0;
+ vpid_sync_vcpu_all(vmx);
+
+ ret = 0;
out:
+ up_read(&vcpu->kvm->slots_lock);
return ret;
}
@@ -1723,6 +1843,8 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler);
+
if (vcpu->arch.rmode.active) {
vmx->rmode.irq.pending = true;
vmx->rmode.irq.vector = irq;
@@ -1844,7 +1966,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if ((vect_info & VECTORING_INFO_VALID_MASK) &&
!is_page_fault(intr_info))
printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
- "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
+ "intr info 0x%x\n", __func__, vect_info, intr_info);
if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
@@ -1869,10 +1991,12 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
error_code = 0;
rip = vmcs_readl(GUEST_RIP);
- if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
+ if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
if (is_page_fault(intr_info)) {
cr2 = vmcs_readl(EXIT_QUALIFICATION);
+ KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2,
+ (u32)((u64)cr2 >> 32), handler);
return kvm_mmu_page_fault(vcpu, cr2, error_code);
}
@@ -1901,6 +2025,7 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
++vcpu->stat.irq_exits;
+ KVMTRACE_1D(INTR, vcpu, vmcs_read32(VM_EXIT_INTR_INFO), handler);
return 1;
}
@@ -1958,25 +2083,27 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
reg = (exit_qualification >> 8) & 15;
switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */
+ KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)vcpu->arch.regs[reg],
+ (u32)((u64)vcpu->arch.regs[reg] >> 32), handler);
switch (cr) {
case 0:
vcpu_load_rsp_rip(vcpu);
- set_cr0(vcpu, vcpu->arch.regs[reg]);
+ kvm_set_cr0(vcpu, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu);
return 1;
case 3:
vcpu_load_rsp_rip(vcpu);
- set_cr3(vcpu, vcpu->arch.regs[reg]);
+ kvm_set_cr3(vcpu, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu);
return 1;
case 4:
vcpu_load_rsp_rip(vcpu);
- set_cr4(vcpu, vcpu->arch.regs[reg]);
+ kvm_set_cr4(vcpu, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu);
return 1;
case 8:
vcpu_load_rsp_rip(vcpu);
- set_cr8(vcpu, vcpu->arch.regs[reg]);
+ kvm_set_cr8(vcpu, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu);
if (irqchip_in_kernel(vcpu->kvm))
return 1;
@@ -1990,6 +2117,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->arch.cr0 &= ~X86_CR0_TS;
vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
vmx_fpu_activate(vcpu);
+ KVMTRACE_0D(CLTS, vcpu, handler);
skip_emulated_instruction(vcpu);
return 1;
case 1: /*mov from cr*/
@@ -1998,18 +2126,24 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu_load_rsp_rip(vcpu);
vcpu->arch.regs[reg] = vcpu->arch.cr3;
vcpu_put_rsp_rip(vcpu);
+ KVMTRACE_3D(CR_READ, vcpu, (u32)cr,
+ (u32)vcpu->arch.regs[reg],
+ (u32)((u64)vcpu->arch.regs[reg] >> 32),
+ handler);
skip_emulated_instruction(vcpu);
return 1;
case 8:
vcpu_load_rsp_rip(vcpu);
- vcpu->arch.regs[reg] = get_cr8(vcpu);
+ vcpu->arch.regs[reg] = kvm_get_cr8(vcpu);
vcpu_put_rsp_rip(vcpu);
+ KVMTRACE_2D(CR_READ, vcpu, (u32)cr,
+ (u32)vcpu->arch.regs[reg], handler);
skip_emulated_instruction(vcpu);
return 1;
}
break;
case 3: /* lmsw */
- lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
+ kvm_lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
skip_emulated_instruction(vcpu);
return 1;
@@ -2049,6 +2183,7 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
val = 0;
}
vcpu->arch.regs[reg] = val;
+ KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
} else {
/* mov to dr */
}
@@ -2073,6 +2208,9 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 1;
}
+ KVMTRACE_3D(MSR_READ, vcpu, ecx, (u32)data, (u32)(data >> 32),
+ handler);
+
/* FIXME: handling of bits 32:63 of rax, rdx */
vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
@@ -2086,6 +2224,9 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
+ KVMTRACE_3D(MSR_WRITE, vcpu, ecx, (u32)data, (u32)(data >> 32),
+ handler);
+
if (vmx_set_msr(vcpu, ecx, data) != 0) {
kvm_inject_gp(vcpu, 0);
return 1;
@@ -2110,6 +2251,9 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu,
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
+
+ KVMTRACE_0D(PEND_INTR, vcpu, handler);
+
/*
* If the user space waits to inject interrupts, exit as soon as
* possible
@@ -2152,6 +2296,8 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
offset = exit_qualification & 0xffful;
+ KVMTRACE_1D(APIC_ACCESS, vcpu, (u32)offset, handler);
+
er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
if (er != EMULATE_DONE) {
@@ -2163,6 +2309,20 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 1;
}
+static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ unsigned long exit_qualification;
+ u16 tss_selector;
+ int reason;
+
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+
+ reason = (u32)exit_qualification >> 30;
+ tss_selector = exit_qualification;
+
+ return kvm_task_switch(vcpu, tss_selector, reason);
+}
+
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -2185,6 +2345,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
[EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
[EXIT_REASON_APIC_ACCESS] = handle_apic_access,
[EXIT_REASON_WBINVD] = handle_wbinvd,
+ [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
};
static const int kvm_vmx_max_exit_handlers =
@@ -2200,6 +2361,9 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 vectoring_info = vmx->idt_vectoring_info;
+ KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)vmcs_readl(GUEST_RIP),
+ (u32)((u64)vmcs_readl(GUEST_RIP) >> 32), entryexit);
+
if (unlikely(vmx->fail)) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason
@@ -2210,7 +2374,7 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
exit_reason != EXIT_REASON_EXCEPTION_NMI)
printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
- "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
+ "exit reason is 0x%x\n", __func__, exit_reason);
if (exit_reason < kvm_vmx_max_exit_handlers
&& kvm_vmx_exit_handlers[exit_reason])
return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
@@ -2221,10 +2385,6 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
return 0;
}
-static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
-{
-}
-
static void update_tpr_threshold(struct kvm_vcpu *vcpu)
{
int max_irr, tpr;
@@ -2285,11 +2445,13 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
return;
}
+ KVMTRACE_1D(REDELIVER_EVT, vcpu, idtv_info_field, handler);
+
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
- if (unlikely(idtv_info_field & INTR_INFO_DELIEVER_CODE_MASK))
+ if (unlikely(idtv_info_field & INTR_INFO_DELIVER_CODE_MASK))
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
vmcs_read32(IDT_VECTORING_ERROR_CODE));
if (unlikely(has_ext_irq))
@@ -2470,8 +2632,10 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
/* We need to handle NMIs before interrupts are enabled */
- if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
+ if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
+ KVMTRACE_0D(NMI, vcpu, handler);
asm("int $2");
+ }
}
static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
@@ -2489,6 +2653,10 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ spin_lock(&vmx_vpid_lock);
+ if (vmx->vpid != 0)
+ __clear_bit(vmx->vpid, vmx_vpid_bitmap);
+ spin_unlock(&vmx_vpid_lock);
vmx_free_vmcs(vcpu);
kfree(vmx->host_msrs);
kfree(vmx->guest_msrs);
@@ -2505,6 +2673,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
if (!vmx)
return ERR_PTR(-ENOMEM);
+ allocate_vpid(vmx);
+
err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
if (err)
goto free_vcpu;
@@ -2591,14 +2761,13 @@ static struct kvm_x86_ops vmx_x86_ops = {
.get_segment_base = vmx_get_segment_base,
.get_segment = vmx_get_segment,
.set_segment = vmx_set_segment,
+ .get_cpl = vmx_get_cpl,
.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
.decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
.set_cr0 = vmx_set_cr0,
.set_cr3 = vmx_set_cr3,
.set_cr4 = vmx_set_cr4,
-#ifdef CONFIG_X86_64
.set_efer = vmx_set_efer,
-#endif
.get_idt = vmx_get_idt,
.set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt,
@@ -2626,7 +2795,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
static int __init vmx_init(void)
{
- void *iova;
+ void *va;
int r;
vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
@@ -2639,28 +2808,48 @@ static int __init vmx_init(void)
goto out;
}
+ vmx_msr_bitmap = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+ if (!vmx_msr_bitmap) {
+ r = -ENOMEM;
+ goto out1;
+ }
+
/*
* Allow direct access to the PC debug port (it is often used for I/O
* delays, but the vmexits simply slow things down).
*/
- iova = kmap(vmx_io_bitmap_a);
- memset(iova, 0xff, PAGE_SIZE);
- clear_bit(0x80, iova);
+ va = kmap(vmx_io_bitmap_a);
+ memset(va, 0xff, PAGE_SIZE);
+ clear_bit(0x80, va);
kunmap(vmx_io_bitmap_a);
- iova = kmap(vmx_io_bitmap_b);
- memset(iova, 0xff, PAGE_SIZE);
+ va = kmap(vmx_io_bitmap_b);
+ memset(va, 0xff, PAGE_SIZE);
kunmap(vmx_io_bitmap_b);
+ va = kmap(vmx_msr_bitmap);
+ memset(va, 0xff, PAGE_SIZE);
+ kunmap(vmx_msr_bitmap);
+
+ set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
+
r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
if (r)
- goto out1;
+ goto out2;
+
+ vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_FS_BASE);
+ vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_GS_BASE);
+ vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_CS);
+ vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP);
+ vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP);
if (bypass_guest_pf)
kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
return 0;
+out2:
+ __free_page(vmx_msr_bitmap);
out1:
__free_page(vmx_io_bitmap_b);
out:
@@ -2670,6 +2859,7 @@ out:
static void __exit vmx_exit(void)
{
+ __free_page(vmx_msr_bitmap);
__free_page(vmx_io_bitmap_b);
__free_page(vmx_io_bitmap_a);
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h
index d52ae8d7303d..5dff4606b988 100644
--- a/arch/x86/kvm/vmx.h
+++ b/arch/x86/kvm/vmx.h
@@ -49,6 +49,7 @@
* Definitions of Secondary Processor-Based VM-Execution Controls.
*/
#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
+#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
@@ -65,6 +66,7 @@
/* VMCS Encodings */
enum vmcs_field {
+ VIRTUAL_PROCESSOR_ID = 0x00000000,
GUEST_ES_SELECTOR = 0x00000800,
GUEST_CS_SELECTOR = 0x00000802,
GUEST_SS_SELECTOR = 0x00000804,
@@ -231,12 +233,12 @@ enum vmcs_field {
*/
#define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
#define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
-#define INTR_INFO_DELIEVER_CODE_MASK 0x800 /* 11 */
+#define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */
#define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
#define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK
#define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK
-#define VECTORING_INFO_DELIEVER_CODE_MASK INTR_INFO_DELIEVER_CODE_MASK
+#define VECTORING_INFO_DELIVER_CODE_MASK INTR_INFO_DELIVER_CODE_MASK
#define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK
#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
@@ -321,4 +323,8 @@ enum vmcs_field {
#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT 9
+#define VMX_NR_VPIDS (1 << 16)
+#define VMX_VPID_EXTENT_SINGLE_CONTEXT 1
+#define VMX_VPID_EXTENT_ALL_CONTEXT 2
+
#endif
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6b01552bd1f1..0ce556372a4d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -15,10 +15,12 @@
*/
#include <linux/kvm_host.h>
-#include "segment_descriptor.h"
#include "irq.h"
#include "mmu.h"
+#include "i8254.h"
+#include "tss.h"
+#include <linux/clocksource.h>
#include <linux/kvm.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
@@ -28,6 +30,7 @@
#include <asm/uaccess.h>
#include <asm/msr.h>
+#include <asm/desc.h>
#define MAX_IO_MSRS 256
#define CR0_RESERVED_BITS \
@@ -41,7 +44,15 @@
| X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
-#define EFER_RESERVED_BITS 0xfffffffffffff2fe
+/* EFER defaults:
+ * - enable syscall per default because its emulated by KVM
+ * - enable LME and LMA per default on 64 bit KVM
+ */
+#ifdef CONFIG_X86_64
+static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
+#else
+static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
+#endif
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
@@ -63,6 +74,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "irq_window", VCPU_STAT(irq_window_exits) },
{ "halt_exits", VCPU_STAT(halt_exits) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
+ { "hypercalls", VCPU_STAT(hypercalls) },
{ "request_irq", VCPU_STAT(request_irq_exits) },
{ "irq_exits", VCPU_STAT(irq_exits) },
{ "host_state_reload", VCPU_STAT(host_state_reload) },
@@ -78,6 +90,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "mmu_recycled", VM_STAT(mmu_recycled) },
{ "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
+ { "largepages", VM_STAT(lpages) },
{ NULL }
};
@@ -85,7 +98,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
unsigned long segment_base(u16 selector)
{
struct descriptor_table gdt;
- struct segment_descriptor *d;
+ struct desc_struct *d;
unsigned long table_base;
unsigned long v;
@@ -101,13 +114,12 @@ unsigned long segment_base(u16 selector)
asm("sldt %0" : "=g"(ldt_selector));
table_base = segment_base(ldt_selector);
}
- d = (struct segment_descriptor *)(table_base + (selector & ~7));
- v = d->base_low | ((unsigned long)d->base_mid << 16) |
- ((unsigned long)d->base_high << 24);
+ d = (struct desc_struct *)(table_base + (selector & ~7));
+ v = d->base0 | ((unsigned long)d->base1 << 16) |
+ ((unsigned long)d->base2 << 24);
#ifdef CONFIG_X86_64
- if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
- v |= ((unsigned long) \
- ((struct segment_descriptor_64 *)d)->base_higher) << 32;
+ if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
+ v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
#endif
return v;
}
@@ -145,11 +157,16 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
u32 error_code)
{
++vcpu->stat.pf_guest;
- if (vcpu->arch.exception.pending && vcpu->arch.exception.nr == PF_VECTOR) {
- printk(KERN_DEBUG "kvm: inject_page_fault:"
- " double fault 0x%lx\n", addr);
- vcpu->arch.exception.nr = DF_VECTOR;
- vcpu->arch.exception.error_code = 0;
+ if (vcpu->arch.exception.pending) {
+ if (vcpu->arch.exception.nr == PF_VECTOR) {
+ printk(KERN_DEBUG "kvm: inject_page_fault:"
+ " double fault 0x%lx\n", addr);
+ vcpu->arch.exception.nr = DF_VECTOR;
+ vcpu->arch.exception.error_code = 0;
+ } else if (vcpu->arch.exception.nr == DF_VECTOR) {
+ /* triple fault -> shutdown */
+ set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
+ }
return;
}
vcpu->arch.cr2 = addr;
@@ -184,7 +201,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
int ret;
u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
- down_read(&vcpu->kvm->slots_lock);
ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
offset * sizeof(u64), sizeof(pdpte));
if (ret < 0) {
@@ -201,10 +217,10 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
out:
- up_read(&vcpu->kvm->slots_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(load_pdptrs);
static bool pdptrs_changed(struct kvm_vcpu *vcpu)
{
@@ -215,18 +231,16 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
if (is_long_mode(vcpu) || !is_pae(vcpu))
return false;
- down_read(&vcpu->kvm->slots_lock);
r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
if (r < 0)
goto out;
changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
out:
- up_read(&vcpu->kvm->slots_lock);
return changed;
}
-void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
if (cr0 & CR0_RESERVED_BITS) {
printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
@@ -284,15 +298,18 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
kvm_mmu_reset_context(vcpu);
return;
}
-EXPORT_SYMBOL_GPL(set_cr0);
+EXPORT_SYMBOL_GPL(kvm_set_cr0);
-void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
+void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
{
- set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
+ kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
+ KVMTRACE_1D(LMSW, vcpu,
+ (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
+ handler);
}
-EXPORT_SYMBOL_GPL(lmsw);
+EXPORT_SYMBOL_GPL(kvm_lmsw);
-void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
if (cr4 & CR4_RESERVED_BITS) {
printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
@@ -323,9 +340,9 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vcpu->arch.cr4 = cr4;
kvm_mmu_reset_context(vcpu);
}
-EXPORT_SYMBOL_GPL(set_cr4);
+EXPORT_SYMBOL_GPL(kvm_set_cr4);
-void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
kvm_mmu_flush_tlb(vcpu);
@@ -359,7 +376,6 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
*/
}
- down_read(&vcpu->kvm->slots_lock);
/*
* Does the new cr3 value map to physical memory? (Note, we
* catch an invalid cr3 even in real-mode, because it would
@@ -375,11 +391,10 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
vcpu->arch.cr3 = cr3;
vcpu->arch.mmu.new_cr3(vcpu);
}
- up_read(&vcpu->kvm->slots_lock);
}
-EXPORT_SYMBOL_GPL(set_cr3);
+EXPORT_SYMBOL_GPL(kvm_set_cr3);
-void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
+void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{
if (cr8 & CR8_RESERVED_BITS) {
printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
@@ -391,16 +406,16 @@ void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
else
vcpu->arch.cr8 = cr8;
}
-EXPORT_SYMBOL_GPL(set_cr8);
+EXPORT_SYMBOL_GPL(kvm_set_cr8);
-unsigned long get_cr8(struct kvm_vcpu *vcpu)
+unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
{
if (irqchip_in_kernel(vcpu->kvm))
return kvm_lapic_get_cr8(vcpu);
else
return vcpu->arch.cr8;
}
-EXPORT_SYMBOL_GPL(get_cr8);
+EXPORT_SYMBOL_GPL(kvm_get_cr8);
/*
* List of msr numbers which we expose to userspace through KVM_GET_MSRS
@@ -415,7 +430,8 @@ static u32 msrs_to_save[] = {
#ifdef CONFIG_X86_64
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif
- MSR_IA32_TIME_STAMP_COUNTER,
+ MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
+ MSR_IA32_PERF_STATUS,
};
static unsigned num_msrs_to_save;
@@ -424,11 +440,9 @@ static u32 emulated_msrs[] = {
MSR_IA32_MISC_ENABLE,
};
-#ifdef CONFIG_X86_64
-
static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
- if (efer & EFER_RESERVED_BITS) {
+ if (efer & efer_reserved_bits) {
printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
efer);
kvm_inject_gp(vcpu, 0);
@@ -450,7 +464,12 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
vcpu->arch.shadow_efer = efer;
}
-#endif
+void kvm_enable_efer_bits(u64 mask)
+{
+ efer_reserved_bits &= ~mask;
+}
+EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+
/*
* Writes msr value into into the appropriate "register".
@@ -470,26 +489,86 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
return kvm_set_msr(vcpu, index, *data);
}
+static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
+{
+ static int version;
+ struct kvm_wall_clock wc;
+ struct timespec wc_ts;
+
+ if (!wall_clock)
+ return;
+
+ version++;
+
+ kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
+
+ wc_ts = current_kernel_time();
+ wc.wc_sec = wc_ts.tv_sec;
+ wc.wc_nsec = wc_ts.tv_nsec;
+ wc.wc_version = version;
+
+ kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
+
+ version++;
+ kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
+}
+
+static void kvm_write_guest_time(struct kvm_vcpu *v)
+{
+ struct timespec ts;
+ unsigned long flags;
+ struct kvm_vcpu_arch *vcpu = &v->arch;
+ void *shared_kaddr;
+
+ if ((!vcpu->time_page))
+ return;
+
+ /* Keep irq disabled to prevent changes to the clock */
+ local_irq_save(flags);
+ kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
+ &vcpu->hv_clock.tsc_timestamp);
+ ktime_get_ts(&ts);
+ local_irq_restore(flags);
+
+ /* With all the info we got, fill in the values */
+
+ vcpu->hv_clock.system_time = ts.tv_nsec +
+ (NSEC_PER_SEC * (u64)ts.tv_sec);
+ /*
+ * The interface expects us to write an even number signaling that the
+ * update is finished. Since the guest won't see the intermediate
+ * state, we just write "2" at the end
+ */
+ vcpu->hv_clock.version = 2;
+
+ shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
+
+ memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
+
+ kunmap_atomic(shared_kaddr, KM_USER0);
+
+ mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+}
+
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
switch (msr) {
-#ifdef CONFIG_X86_64
case MSR_EFER:
set_efer(vcpu, data);
break;
-#endif
case MSR_IA32_MC0_STATUS:
pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
- __FUNCTION__, data);
+ __func__, data);
break;
case MSR_IA32_MCG_STATUS:
pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
- __FUNCTION__, data);
+ __func__, data);
break;
case MSR_IA32_MCG_CTL:
pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
- __FUNCTION__, data);
+ __func__, data);
break;
case MSR_IA32_UCODE_REV:
case MSR_IA32_UCODE_WRITE:
@@ -501,6 +580,42 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
case MSR_IA32_MISC_ENABLE:
vcpu->arch.ia32_misc_enable_msr = data;
break;
+ case MSR_KVM_WALL_CLOCK:
+ vcpu->kvm->arch.wall_clock = data;
+ kvm_write_wall_clock(vcpu->kvm, data);
+ break;
+ case MSR_KVM_SYSTEM_TIME: {
+ if (vcpu->arch.time_page) {
+ kvm_release_page_dirty(vcpu->arch.time_page);
+ vcpu->arch.time_page = NULL;
+ }
+
+ vcpu->arch.time = data;
+
+ /* we verify if the enable bit is set... */
+ if (!(data & 1))
+ break;
+
+ /* ...but clean it before doing the actual write */
+ vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+
+ vcpu->arch.hv_clock.tsc_to_system_mul =
+ clocksource_khz2mult(tsc_khz, 22);
+ vcpu->arch.hv_clock.tsc_shift = 22;
+
+ down_read(&current->mm->mmap_sem);
+ vcpu->arch.time_page =
+ gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
+ up_read(&current->mm->mmap_sem);
+
+ if (is_error_page(vcpu->arch.time_page)) {
+ kvm_release_page_clean(vcpu->arch.time_page);
+ vcpu->arch.time_page = NULL;
+ }
+
+ kvm_write_guest_time(vcpu);
+ break;
+ }
default:
pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
return 1;
@@ -540,7 +655,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_IA32_MC0_MISC+12:
case MSR_IA32_MC0_MISC+16:
case MSR_IA32_UCODE_REV:
- case MSR_IA32_PERF_STATUS:
case MSR_IA32_EBL_CR_POWERON:
/* MTRR registers */
case 0xfe:
@@ -556,11 +670,21 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_IA32_MISC_ENABLE:
data = vcpu->arch.ia32_misc_enable_msr;
break;
-#ifdef CONFIG_X86_64
+ case MSR_IA32_PERF_STATUS:
+ /* TSC increment by tick */
+ data = 1000ULL;
+ /* CPU multiplier */
+ data |= (((uint64_t)4ULL) << 40);
+ break;
case MSR_EFER:
data = vcpu->arch.shadow_efer;
break;
-#endif
+ case MSR_KVM_WALL_CLOCK:
+ data = vcpu->kvm->arch.wall_clock;
+ break;
+ case MSR_KVM_SYSTEM_TIME:
+ data = vcpu->arch.time;
+ break;
default:
pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
return 1;
@@ -584,9 +708,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
vcpu_load(vcpu);
+ down_read(&vcpu->kvm->slots_lock);
for (i = 0; i < msrs->nmsrs; ++i)
if (do_msr(vcpu, entries[i].index, &entries[i].data))
break;
+ up_read(&vcpu->kvm->slots_lock);
vcpu_put(vcpu);
@@ -688,11 +814,24 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_USER_MEMORY:
case KVM_CAP_SET_TSS_ADDR:
case KVM_CAP_EXT_CPUID:
+ case KVM_CAP_CLOCKSOURCE:
+ case KVM_CAP_PIT:
+ case KVM_CAP_NOP_IO_DELAY:
+ case KVM_CAP_MP_STATE:
r = 1;
break;
case KVM_CAP_VAPIC:
r = !kvm_x86_ops->cpu_has_accelerated_tpr();
break;
+ case KVM_CAP_NR_VCPUS:
+ r = KVM_MAX_VCPUS;
+ break;
+ case KVM_CAP_NR_MEMSLOTS:
+ r = KVM_MEMORY_SLOTS;
+ break;
+ case KVM_CAP_PV_MMU:
+ r = !tdp_enabled;
+ break;
default:
r = 0;
break;
@@ -763,6 +902,7 @@ out:
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
kvm_x86_ops->vcpu_load(vcpu, cpu);
+ kvm_write_guest_time(vcpu);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -958,32 +1098,32 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
}
/* function 4 and 0xb have additional index. */
case 4: {
- int index, cache_type;
+ int i, cache_type;
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
/* read more entries until cache_type is zero */
- for (index = 1; *nent < maxnent; ++index) {
- cache_type = entry[index - 1].eax & 0x1f;
+ for (i = 1; *nent < maxnent; ++i) {
+ cache_type = entry[i - 1].eax & 0x1f;
if (!cache_type)
break;
- do_cpuid_1_ent(&entry[index], function, index);
- entry[index].flags |=
+ do_cpuid_1_ent(&entry[i], function, i);
+ entry[i].flags |=
KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
++*nent;
}
break;
}
case 0xb: {
- int index, level_type;
+ int i, level_type;
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
/* read more entries until level_type is zero */
- for (index = 1; *nent < maxnent; ++index) {
- level_type = entry[index - 1].ecx & 0xff;
+ for (i = 1; *nent < maxnent; ++i) {
+ level_type = entry[i - 1].ecx & 0xff;
if (!level_type)
break;
- do_cpuid_1_ent(&entry[index], function, index);
- entry[index].flags |=
+ do_cpuid_1_ent(&entry[i], function, i);
+ entry[i].flags |=
KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
++*nent;
}
@@ -1365,6 +1505,23 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
return r;
}
+static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
+{
+ int r = 0;
+
+ memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
+ return r;
+}
+
+static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
+{
+ int r = 0;
+
+ memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
+ kvm_pit_load_count(kvm, 0, ps->channels[0].count);
+ return r;
+}
+
/*
* Get (and clear) the dirty memory log for a memory slot.
*/
@@ -1457,6 +1614,12 @@ long kvm_arch_vm_ioctl(struct file *filp,
} else
goto out;
break;
+ case KVM_CREATE_PIT:
+ r = -ENOMEM;
+ kvm->arch.vpit = kvm_create_pit(kvm);
+ if (kvm->arch.vpit)
+ r = 0;
+ break;
case KVM_IRQ_LINE: {
struct kvm_irq_level irq_event;
@@ -1512,6 +1675,37 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = 0;
break;
}
+ case KVM_GET_PIT: {
+ struct kvm_pit_state ps;
+ r = -EFAULT;
+ if (copy_from_user(&ps, argp, sizeof ps))
+ goto out;
+ r = -ENXIO;
+ if (!kvm->arch.vpit)
+ goto out;
+ r = kvm_vm_ioctl_get_pit(kvm, &ps);
+ if (r)
+ goto out;
+ r = -EFAULT;
+ if (copy_to_user(argp, &ps, sizeof ps))
+ goto out;
+ r = 0;
+ break;
+ }
+ case KVM_SET_PIT: {
+ struct kvm_pit_state ps;
+ r = -EFAULT;
+ if (copy_from_user(&ps, argp, sizeof ps))
+ goto out;
+ r = -ENXIO;
+ if (!kvm->arch.vpit)
+ goto out;
+ r = kvm_vm_ioctl_set_pit(kvm, &ps);
+ if (r)
+ goto out;
+ r = 0;
+ break;
+ }
default:
;
}
@@ -1570,7 +1764,6 @@ int emulator_read_std(unsigned long addr,
void *data = val;
int r = X86EMUL_CONTINUE;
- down_read(&vcpu->kvm->slots_lock);
while (bytes) {
gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
unsigned offset = addr & (PAGE_SIZE-1);
@@ -1592,7 +1785,6 @@ int emulator_read_std(unsigned long addr,
addr += tocopy;
}
out:
- up_read(&vcpu->kvm->slots_lock);
return r;
}
EXPORT_SYMBOL_GPL(emulator_read_std);
@@ -1611,9 +1803,7 @@ static int emulator_read_emulated(unsigned long addr,
return X86EMUL_CONTINUE;
}
- down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
- up_read(&vcpu->kvm->slots_lock);
/* For APIC access vmexit */
if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -1646,19 +1836,15 @@ mmio:
return X86EMUL_UNHANDLEABLE;
}
-static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
- const void *val, int bytes)
+int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const void *val, int bytes)
{
int ret;
- down_read(&vcpu->kvm->slots_lock);
ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
- if (ret < 0) {
- up_read(&vcpu->kvm->slots_lock);
+ if (ret < 0)
return 0;
- }
kvm_mmu_pte_write(vcpu, gpa, val, bytes);
- up_read(&vcpu->kvm->slots_lock);
return 1;
}
@@ -1670,9 +1856,7 @@ static int emulator_write_emulated_onepage(unsigned long addr,
struct kvm_io_device *mmio_dev;
gpa_t gpa;
- down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
- up_read(&vcpu->kvm->slots_lock);
if (gpa == UNMAPPED_GVA) {
kvm_inject_page_fault(vcpu, addr, 2);
@@ -1749,7 +1933,6 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
char *kaddr;
u64 val;
- down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
if (gpa == UNMAPPED_GVA ||
@@ -1769,9 +1952,8 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
kunmap_atomic(kaddr, KM_USER0);
kvm_release_page_dirty(page);
- emul_write:
- up_read(&vcpu->kvm->slots_lock);
}
+emul_write:
#endif
return emulator_write_emulated(addr, new, bytes, vcpu);
@@ -1802,7 +1984,7 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
*dest = kvm_x86_ops->get_dr(vcpu, dr);
return X86EMUL_CONTINUE;
default:
- pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
+ pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
return X86EMUL_UNHANDLEABLE;
}
}
@@ -1840,7 +2022,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
}
EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
-struct x86_emulate_ops emulate_ops = {
+static struct x86_emulate_ops emulate_ops = {
.read_std = emulator_read_std,
.read_emulated = emulator_read_emulated,
.write_emulated = emulator_write_emulated,
@@ -2091,6 +2273,13 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
vcpu->arch.pio.guest_page_offset = 0;
vcpu->arch.pio.rep = 0;
+ if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
+ KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
+ handler);
+ else
+ KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
+ handler);
+
kvm_x86_ops->cache_regs(vcpu);
memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
kvm_x86_ops->decache_regs(vcpu);
@@ -2129,6 +2318,13 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
vcpu->arch.pio.guest_page_offset = offset_in_page(address);
vcpu->arch.pio.rep = rep;
+ if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
+ KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
+ handler);
+ else
+ KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
+ handler);
+
if (!count) {
kvm_x86_ops->skip_emulated_instruction(vcpu);
return 1;
@@ -2163,10 +2359,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
kvm_x86_ops->skip_emulated_instruction(vcpu);
for (i = 0; i < nr_pages; ++i) {
- down_read(&vcpu->kvm->slots_lock);
page = gva_to_page(vcpu, address + i * PAGE_SIZE);
vcpu->arch.pio.guest_pages[i] = page;
- up_read(&vcpu->kvm->slots_lock);
if (!page) {
kvm_inject_gp(vcpu, 0);
free_pio_guest_pages(vcpu);
@@ -2238,10 +2432,13 @@ void kvm_arch_exit(void)
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{
++vcpu->stat.halt_exits;
+ KVMTRACE_0D(HLT, vcpu, handler);
if (irqchip_in_kernel(vcpu->kvm)) {
- vcpu->arch.mp_state = VCPU_MP_STATE_HALTED;
+ vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
+ up_read(&vcpu->kvm->slots_lock);
kvm_vcpu_block(vcpu);
- if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE)
+ down_read(&vcpu->kvm->slots_lock);
+ if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
return -EINTR;
return 1;
} else {
@@ -2251,9 +2448,19 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
+static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
+ unsigned long a1)
+{
+ if (is_long_mode(vcpu))
+ return a0;
+ else
+ return a0 | ((gpa_t)a1 << 32);
+}
+
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
unsigned long nr, a0, a1, a2, a3, ret;
+ int r = 1;
kvm_x86_ops->cache_regs(vcpu);
@@ -2263,6 +2470,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
a2 = vcpu->arch.regs[VCPU_REGS_RDX];
a3 = vcpu->arch.regs[VCPU_REGS_RSI];
+ KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
+
if (!is_long_mode(vcpu)) {
nr &= 0xFFFFFFFF;
a0 &= 0xFFFFFFFF;
@@ -2275,13 +2484,17 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
case KVM_HC_VAPIC_POLL_IRQ:
ret = 0;
break;
+ case KVM_HC_MMU_OP:
+ r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
+ break;
default:
ret = -KVM_ENOSYS;
break;
}
vcpu->arch.regs[VCPU_REGS_RAX] = ret;
kvm_x86_ops->decache_regs(vcpu);
- return 0;
+ ++vcpu->stat.hypercalls;
+ return r;
}
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
@@ -2329,7 +2542,7 @@ void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
unsigned long *rflags)
{
- lmsw(vcpu, msw);
+ kvm_lmsw(vcpu, msw);
*rflags = kvm_x86_ops->get_rflags(vcpu);
}
@@ -2346,9 +2559,9 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
case 4:
return vcpu->arch.cr4;
case 8:
- return get_cr8(vcpu);
+ return kvm_get_cr8(vcpu);
default:
- vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
+ vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
return 0;
}
}
@@ -2358,23 +2571,23 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
{
switch (cr) {
case 0:
- set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
+ kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
*rflags = kvm_x86_ops->get_rflags(vcpu);
break;
case 2:
vcpu->arch.cr2 = val;
break;
case 3:
- set_cr3(vcpu, val);
+ kvm_set_cr3(vcpu, val);
break;
case 4:
- set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
+ kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
break;
case 8:
- set_cr8(vcpu, val & 0xfUL);
+ kvm_set_cr8(vcpu, val & 0xfUL);
break;
default:
- vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
+ vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
}
}
@@ -2447,6 +2660,11 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
}
kvm_x86_ops->decache_regs(vcpu);
kvm_x86_ops->skip_emulated_instruction(vcpu);
+ KVMTRACE_5D(CPUID, vcpu, function,
+ (u32)vcpu->arch.regs[VCPU_REGS_RAX],
+ (u32)vcpu->arch.regs[VCPU_REGS_RBX],
+ (u32)vcpu->arch.regs[VCPU_REGS_RCX],
+ (u32)vcpu->arch.regs[VCPU_REGS_RDX], handler);
}
EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
@@ -2469,7 +2687,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
- kvm_run->cr8 = get_cr8(vcpu);
+ kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu);
if (irqchip_in_kernel(vcpu->kvm))
kvm_run->ready_for_interrupt_injection = 1;
@@ -2509,16 +2727,17 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int r;
- if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
+ if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
pr_debug("vcpu %d received sipi with vector # %x\n",
vcpu->vcpu_id, vcpu->arch.sipi_vector);
kvm_lapic_reset(vcpu);
r = kvm_x86_ops->vcpu_reset(vcpu);
if (r)
return r;
- vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
+ down_read(&vcpu->kvm->slots_lock);
vapic_enter(vcpu);
preempted:
@@ -2526,6 +2745,10 @@ preempted:
kvm_x86_ops->guest_debug_pre(vcpu);
again:
+ if (vcpu->requests)
+ if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
+ kvm_mmu_unload(vcpu);
+
r = kvm_mmu_reload(vcpu);
if (unlikely(r))
goto out;
@@ -2539,6 +2762,11 @@ again:
r = 0;
goto out;
}
+ if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
+ kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
+ r = 0;
+ goto out;
+ }
}
kvm_inject_pending_timer_irqs(vcpu);
@@ -2557,6 +2785,14 @@ again:
goto out;
}
+ if (vcpu->requests)
+ if (test_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) {
+ local_irq_enable();
+ preempt_enable();
+ r = 1;
+ goto out;
+ }
+
if (signal_pending(current)) {
local_irq_enable();
preempt_enable();
@@ -2566,6 +2802,13 @@ again:
goto out;
}
+ vcpu->guest_mode = 1;
+ /*
+ * Make sure that guest_mode assignment won't happen after
+ * testing the pending IRQ vector bitmap.
+ */
+ smp_wmb();
+
if (vcpu->arch.exception.pending)
__queue_exception(vcpu);
else if (irqchip_in_kernel(vcpu->kvm))
@@ -2575,13 +2818,15 @@ again:
kvm_lapic_sync_to_vapic(vcpu);
- vcpu->guest_mode = 1;
+ up_read(&vcpu->kvm->slots_lock);
+
kvm_guest_enter();
if (vcpu->requests)
if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
kvm_x86_ops->tlb_flush(vcpu);
+ KVMTRACE_0D(VMENTRY, vcpu, entryexit);
kvm_x86_ops->run(vcpu, kvm_run);
vcpu->guest_mode = 0;
@@ -2601,6 +2846,8 @@ again:
preempt_enable();
+ down_read(&vcpu->kvm->slots_lock);
+
/*
* Profile KVM exit RIPs:
*/
@@ -2628,14 +2875,18 @@ again:
}
out:
+ up_read(&vcpu->kvm->slots_lock);
if (r > 0) {
kvm_resched(vcpu);
+ down_read(&vcpu->kvm->slots_lock);
goto preempted;
}
post_kvm_run_save(vcpu, kvm_run);
+ down_read(&vcpu->kvm->slots_lock);
vapic_exit(vcpu);
+ up_read(&vcpu->kvm->slots_lock);
return r;
}
@@ -2647,7 +2898,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu_load(vcpu);
- if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
+ if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
kvm_vcpu_block(vcpu);
vcpu_put(vcpu);
return -EAGAIN;
@@ -2658,7 +2909,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
/* re-sync apic's tpr */
if (!irqchip_in_kernel(vcpu->kvm))
- set_cr8(vcpu, kvm_run->cr8);
+ kvm_set_cr8(vcpu, kvm_run->cr8);
if (vcpu->arch.pio.cur_count) {
r = complete_pio(vcpu);
@@ -2670,9 +2921,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
vcpu->mmio_read_completed = 1;
vcpu->mmio_needed = 0;
+
+ down_read(&vcpu->kvm->slots_lock);
r = emulate_instruction(vcpu, kvm_run,
vcpu->arch.mmio_fault_cr2, 0,
EMULTYPE_NO_DECODE);
+ up_read(&vcpu->kvm->slots_lock);
if (r == EMULATE_DO_MMIO) {
/*
* Read-modify-write. Back to userspace.
@@ -2773,7 +3027,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
static void get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
- return kvm_x86_ops->get_segment(vcpu, var, seg);
+ kvm_x86_ops->get_segment(vcpu, var, seg);
}
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@@ -2816,7 +3070,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
sregs->cr2 = vcpu->arch.cr2;
sregs->cr3 = vcpu->arch.cr3;
sregs->cr4 = vcpu->arch.cr4;
- sregs->cr8 = get_cr8(vcpu);
+ sregs->cr8 = kvm_get_cr8(vcpu);
sregs->efer = vcpu->arch.shadow_efer;
sregs->apic_base = kvm_get_apic_base(vcpu);
@@ -2836,12 +3090,438 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
return 0;
}
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ vcpu_load(vcpu);
+ mp_state->mp_state = vcpu->arch.mp_state;
+ vcpu_put(vcpu);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ vcpu_load(vcpu);
+ vcpu->arch.mp_state = mp_state->mp_state;
+ vcpu_put(vcpu);
+ return 0;
+}
+
static void set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
- return kvm_x86_ops->set_segment(vcpu, var, seg);
+ kvm_x86_ops->set_segment(vcpu, var, seg);
+}
+
+static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
+ struct kvm_segment *kvm_desct)
+{
+ kvm_desct->base = seg_desc->base0;
+ kvm_desct->base |= seg_desc->base1 << 16;
+ kvm_desct->base |= seg_desc->base2 << 24;
+ kvm_desct->limit = seg_desc->limit0;
+ kvm_desct->limit |= seg_desc->limit << 16;
+ kvm_desct->selector = selector;
+ kvm_desct->type = seg_desc->type;
+ kvm_desct->present = seg_desc->p;
+ kvm_desct->dpl = seg_desc->dpl;
+ kvm_desct->db = seg_desc->d;
+ kvm_desct->s = seg_desc->s;
+ kvm_desct->l = seg_desc->l;
+ kvm_desct->g = seg_desc->g;
+ kvm_desct->avl = seg_desc->avl;
+ if (!selector)
+ kvm_desct->unusable = 1;
+ else
+ kvm_desct->unusable = 0;
+ kvm_desct->padding = 0;
+}
+
+static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
+ u16 selector,
+ struct descriptor_table *dtable)
+{
+ if (selector & 1 << 2) {
+ struct kvm_segment kvm_seg;
+
+ get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
+
+ if (kvm_seg.unusable)
+ dtable->limit = 0;
+ else
+ dtable->limit = kvm_seg.limit;
+ dtable->base = kvm_seg.base;
+ }
+ else
+ kvm_x86_ops->get_gdt(vcpu, dtable);
+}
+
+/* allowed just for 8 bytes segments */
+static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
+ struct desc_struct *seg_desc)
+{
+ struct descriptor_table dtable;
+ u16 index = selector >> 3;
+
+ get_segment_descritptor_dtable(vcpu, selector, &dtable);
+
+ if (dtable.limit < index * 8 + 7) {
+ kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
+ return 1;
+ }
+ return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
+}
+
+/* allowed just for 8 bytes segments */
+static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
+ struct desc_struct *seg_desc)
+{
+ struct descriptor_table dtable;
+ u16 index = selector >> 3;
+
+ get_segment_descritptor_dtable(vcpu, selector, &dtable);
+
+ if (dtable.limit < index * 8 + 7)
+ return 1;
+ return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
+}
+
+static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
+ struct desc_struct *seg_desc)
+{
+ u32 base_addr;
+
+ base_addr = seg_desc->base0;
+ base_addr |= (seg_desc->base1 << 16);
+ base_addr |= (seg_desc->base2 << 24);
+
+ return base_addr;
+}
+
+static int load_tss_segment32(struct kvm_vcpu *vcpu,
+ struct desc_struct *seg_desc,
+ struct tss_segment_32 *tss)
+{
+ u32 base_addr;
+
+ base_addr = get_tss_base_addr(vcpu, seg_desc);
+
+ return kvm_read_guest(vcpu->kvm, base_addr, tss,
+ sizeof(struct tss_segment_32));
+}
+
+static int save_tss_segment32(struct kvm_vcpu *vcpu,
+ struct desc_struct *seg_desc,
+ struct tss_segment_32 *tss)
+{
+ u32 base_addr;
+
+ base_addr = get_tss_base_addr(vcpu, seg_desc);
+
+ return kvm_write_guest(vcpu->kvm, base_addr, tss,
+ sizeof(struct tss_segment_32));
+}
+
+static int load_tss_segment16(struct kvm_vcpu *vcpu,
+ struct desc_struct *seg_desc,
+ struct tss_segment_16 *tss)
+{
+ u32 base_addr;
+
+ base_addr = get_tss_base_addr(vcpu, seg_desc);
+
+ return kvm_read_guest(vcpu->kvm, base_addr, tss,
+ sizeof(struct tss_segment_16));
+}
+
+static int save_tss_segment16(struct kvm_vcpu *vcpu,
+ struct desc_struct *seg_desc,
+ struct tss_segment_16 *tss)
+{
+ u32 base_addr;
+
+ base_addr = get_tss_base_addr(vcpu, seg_desc);
+
+ return kvm_write_guest(vcpu->kvm, base_addr, tss,
+ sizeof(struct tss_segment_16));
+}
+
+static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
+{
+ struct kvm_segment kvm_seg;
+
+ get_segment(vcpu, &kvm_seg, seg);
+ return kvm_seg.selector;
+}
+
+static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
+ u16 selector,
+ struct kvm_segment *kvm_seg)
+{
+ struct desc_struct seg_desc;
+
+ if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
+ return 1;
+ seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
+ return 0;
+}
+
+static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
+ int type_bits, int seg)
+{
+ struct kvm_segment kvm_seg;
+
+ if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
+ return 1;
+ kvm_seg.type |= type_bits;
+
+ if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
+ seg != VCPU_SREG_LDTR)
+ if (!kvm_seg.s)
+ kvm_seg.unusable = 1;
+
+ set_segment(vcpu, &kvm_seg, seg);
+ return 0;
+}
+
+static void save_state_to_tss32(struct kvm_vcpu *vcpu,
+ struct tss_segment_32 *tss)
+{
+ tss->cr3 = vcpu->arch.cr3;
+ tss->eip = vcpu->arch.rip;
+ tss->eflags = kvm_x86_ops->get_rflags(vcpu);
+ tss->eax = vcpu->arch.regs[VCPU_REGS_RAX];
+ tss->ecx = vcpu->arch.regs[VCPU_REGS_RCX];
+ tss->edx = vcpu->arch.regs[VCPU_REGS_RDX];
+ tss->ebx = vcpu->arch.regs[VCPU_REGS_RBX];
+ tss->esp = vcpu->arch.regs[VCPU_REGS_RSP];
+ tss->ebp = vcpu->arch.regs[VCPU_REGS_RBP];
+ tss->esi = vcpu->arch.regs[VCPU_REGS_RSI];
+ tss->edi = vcpu->arch.regs[VCPU_REGS_RDI];
+
+ tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
+ tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
+ tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
+ tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
+ tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
+ tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
+ tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
+ tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
+}
+
+static int load_state_from_tss32(struct kvm_vcpu *vcpu,
+ struct tss_segment_32 *tss)
+{
+ kvm_set_cr3(vcpu, tss->cr3);
+
+ vcpu->arch.rip = tss->eip;
+ kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
+
+ vcpu->arch.regs[VCPU_REGS_RAX] = tss->eax;
+ vcpu->arch.regs[VCPU_REGS_RCX] = tss->ecx;
+ vcpu->arch.regs[VCPU_REGS_RDX] = tss->edx;
+ vcpu->arch.regs[VCPU_REGS_RBX] = tss->ebx;
+ vcpu->arch.regs[VCPU_REGS_RSP] = tss->esp;
+ vcpu->arch.regs[VCPU_REGS_RBP] = tss->ebp;
+ vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi;
+ vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi;
+
+ if (load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
+ return 1;
+ return 0;
+}
+
+static void save_state_to_tss16(struct kvm_vcpu *vcpu,
+ struct tss_segment_16 *tss)
+{
+ tss->ip = vcpu->arch.rip;
+ tss->flag = kvm_x86_ops->get_rflags(vcpu);
+ tss->ax = vcpu->arch.regs[VCPU_REGS_RAX];
+ tss->cx = vcpu->arch.regs[VCPU_REGS_RCX];
+ tss->dx = vcpu->arch.regs[VCPU_REGS_RDX];
+ tss->bx = vcpu->arch.regs[VCPU_REGS_RBX];
+ tss->sp = vcpu->arch.regs[VCPU_REGS_RSP];
+ tss->bp = vcpu->arch.regs[VCPU_REGS_RBP];
+ tss->si = vcpu->arch.regs[VCPU_REGS_RSI];
+ tss->di = vcpu->arch.regs[VCPU_REGS_RDI];
+
+ tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
+ tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
+ tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
+ tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
+ tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
+ tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
+}
+
+static int load_state_from_tss16(struct kvm_vcpu *vcpu,
+ struct tss_segment_16 *tss)
+{
+ vcpu->arch.rip = tss->ip;
+ kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
+ vcpu->arch.regs[VCPU_REGS_RAX] = tss->ax;
+ vcpu->arch.regs[VCPU_REGS_RCX] = tss->cx;
+ vcpu->arch.regs[VCPU_REGS_RDX] = tss->dx;
+ vcpu->arch.regs[VCPU_REGS_RBX] = tss->bx;
+ vcpu->arch.regs[VCPU_REGS_RSP] = tss->sp;
+ vcpu->arch.regs[VCPU_REGS_RBP] = tss->bp;
+ vcpu->arch.regs[VCPU_REGS_RSI] = tss->si;
+ vcpu->arch.regs[VCPU_REGS_RDI] = tss->di;
+
+ if (load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
+ return 1;
+ return 0;
+}
+
+int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
+ struct desc_struct *cseg_desc,
+ struct desc_struct *nseg_desc)
+{
+ struct tss_segment_16 tss_segment_16;
+ int ret = 0;
+
+ if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16))
+ goto out;
+
+ save_state_to_tss16(vcpu, &tss_segment_16);
+ save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
+
+ if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16))
+ goto out;
+ if (load_state_from_tss16(vcpu, &tss_segment_16))
+ goto out;
+
+ ret = 1;
+out:
+ return ret;
+}
+
+int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
+ struct desc_struct *cseg_desc,
+ struct desc_struct *nseg_desc)
+{
+ struct tss_segment_32 tss_segment_32;
+ int ret = 0;
+
+ if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32))
+ goto out;
+
+ save_state_to_tss32(vcpu, &tss_segment_32);
+ save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
+
+ if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32))
+ goto out;
+ if (load_state_from_tss32(vcpu, &tss_segment_32))
+ goto out;
+
+ ret = 1;
+out:
+ return ret;
}
+int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
+{
+ struct kvm_segment tr_seg;
+ struct desc_struct cseg_desc;
+ struct desc_struct nseg_desc;
+ int ret = 0;
+
+ get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
+
+ if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
+ goto out;
+
+ if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc))
+ goto out;
+
+
+ if (reason != TASK_SWITCH_IRET) {
+ int cpl;
+
+ cpl = kvm_x86_ops->get_cpl(vcpu);
+ if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
+ kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+ return 1;
+ }
+ }
+
+ if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
+ kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
+ return 1;
+ }
+
+ if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
+ cseg_desc.type &= ~(1 << 8); //clear the B flag
+ save_guest_segment_descriptor(vcpu, tr_seg.selector,
+ &cseg_desc);
+ }
+
+ if (reason == TASK_SWITCH_IRET) {
+ u32 eflags = kvm_x86_ops->get_rflags(vcpu);
+ kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
+ }
+
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+ kvm_x86_ops->cache_regs(vcpu);
+
+ if (nseg_desc.type & 8)
+ ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
+ &nseg_desc);
+ else
+ ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc,
+ &nseg_desc);
+
+ if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
+ u32 eflags = kvm_x86_ops->get_rflags(vcpu);
+ kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
+ }
+
+ if (reason != TASK_SWITCH_IRET) {
+ nseg_desc.type |= (1 << 8);
+ save_guest_segment_descriptor(vcpu, tss_selector,
+ &nseg_desc);
+ }
+
+ kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
+ seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
+ tr_seg.type = 11;
+ set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
+out:
+ kvm_x86_ops->decache_regs(vcpu);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kvm_task_switch);
+
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
@@ -2862,12 +3542,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
vcpu->arch.cr3 = sregs->cr3;
- set_cr8(vcpu, sregs->cr8);
+ kvm_set_cr8(vcpu, sregs->cr8);
mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
-#ifdef CONFIG_X86_64
kvm_x86_ops->set_efer(vcpu, sregs->efer);
-#endif
kvm_set_apic_base(vcpu, sregs->apic_base);
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
@@ -3141,9 +3819,9 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
- vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
- vcpu->arch.mp_state = VCPU_MP_STATE_UNINITIALIZED;
+ vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
@@ -3175,7 +3853,9 @@ fail:
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
kvm_free_lapic(vcpu);
+ down_read(&vcpu->kvm->slots_lock);
kvm_mmu_destroy(vcpu);
+ up_read(&vcpu->kvm->slots_lock);
free_page((unsigned long)vcpu->arch.pio_data);
}
@@ -3219,10 +3899,13 @@ static void kvm_free_vcpus(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
+ kvm_free_pit(kvm);
kfree(kvm->arch.vpic);
kfree(kvm->arch.vioapic);
kvm_free_vcpus(kvm);
kvm_free_physmem(kvm);
+ if (kvm->arch.apic_access_page)
+ put_page(kvm->arch.apic_access_page);
kfree(kvm);
}
@@ -3278,8 +3961,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE
- || vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED;
+ return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
+ || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED;
}
static void vcpu_kick_intr(void *info)
@@ -3293,11 +3976,17 @@ static void vcpu_kick_intr(void *info)
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
int ipi_pcpu = vcpu->cpu;
+ int cpu = get_cpu();
if (waitqueue_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq);
++vcpu->stat.halt_wakeup;
}
- if (vcpu->guest_mode)
+ /*
+ * We may be called synchronously with irqs disabled in guest mode,
+ * So need not to call smp_call_function_single() in that case.
+ */
+ if (vcpu->guest_mode && vcpu->cpu != cpu)
smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
+ put_cpu();
}
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
index 79586003397a..2ca08386f993 100644
--- a/arch/x86/kvm/x86_emulate.c
+++ b/arch/x86/kvm/x86_emulate.c
@@ -65,6 +65,14 @@
#define MemAbs (1<<9) /* Memory operand is absolute displacement */
#define String (1<<10) /* String instruction (rep capable) */
#define Stack (1<<11) /* Stack instruction (push/pop) */
+#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
+#define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
+#define GroupMask 0xff /* Group number stored in bits 0:7 */
+
+enum {
+ Group1_80, Group1_81, Group1_82, Group1_83,
+ Group1A, Group3_Byte, Group3, Group4, Group5, Group7,
+};
static u16 opcode_table[256] = {
/* 0x00 - 0x07 */
@@ -123,14 +131,14 @@ static u16 opcode_table[256] = {
ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
/* 0x80 - 0x87 */
- ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
- ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
+ Group | Group1_80, Group | Group1_81,
+ Group | Group1_82, Group | Group1_83,
ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
/* 0x88 - 0x8F */
ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov,
ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
- 0, ModRM | DstReg, 0, DstMem | SrcNone | ModRM | Mov | Stack,
+ 0, ModRM | DstReg, 0, Group | Group1A,
/* 0x90 - 0x9F */
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, ImplicitOps | Stack, ImplicitOps | Stack, 0, 0,
@@ -164,16 +172,15 @@ static u16 opcode_table[256] = {
0, 0, 0, 0,
/* 0xF0 - 0xF7 */
0, 0, 0, 0,
- ImplicitOps, ImplicitOps,
- ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
+ ImplicitOps, ImplicitOps, Group | Group3_Byte, Group | Group3,
/* 0xF8 - 0xFF */
ImplicitOps, 0, ImplicitOps, ImplicitOps,
- 0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM
+ 0, 0, Group | Group4, Group | Group5,
};
static u16 twobyte_table[256] = {
/* 0x00 - 0x0F */
- 0, SrcMem | ModRM | DstReg, 0, 0, 0, 0, ImplicitOps, 0,
+ 0, Group | GroupDual | Group7, 0, 0, 0, 0, ImplicitOps, 0,
ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
/* 0x10 - 0x1F */
0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
@@ -229,6 +236,56 @@ static u16 twobyte_table[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
+static u16 group_table[] = {
+ [Group1_80*8] =
+ ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ [Group1_81*8] =
+ DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
+ DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
+ DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
+ DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
+ [Group1_82*8] =
+ ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ [Group1_83*8] =
+ DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
+ DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
+ DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
+ DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
+ [Group1A*8] =
+ DstMem | SrcNone | ModRM | Mov | Stack, 0, 0, 0, 0, 0, 0, 0,
+ [Group3_Byte*8] =
+ ByteOp | SrcImm | DstMem | ModRM, 0,
+ ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
+ 0, 0, 0, 0,
+ [Group3*8] =
+ DstMem | SrcImm | ModRM | SrcImm, 0,
+ DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
+ 0, 0, 0, 0,
+ [Group4*8] =
+ ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
+ 0, 0, 0, 0, 0, 0,
+ [Group5*8] =
+ DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM, 0, 0,
+ SrcMem | ModRM, 0, SrcMem | ModRM | Stack, 0,
+ [Group7*8] =
+ 0, 0, ModRM | SrcMem, ModRM | SrcMem,
+ SrcNone | ModRM | DstMem | Mov, 0,
+ SrcMem16 | ModRM | Mov, SrcMem | ModRM | ByteOp,
+};
+
+static u16 group2_table[] = {
+ [Group7*8] =
+ SrcNone | ModRM, 0, 0, 0,
+ SrcNone | ModRM | DstMem | Mov, 0,
+ SrcMem16 | ModRM | Mov, 0,
+};
+
/* EFLAGS bit definitions. */
#define EFLG_OF (1<<11)
#define EFLG_DF (1<<10)
@@ -317,7 +374,7 @@ static u16 twobyte_table[256] = {
#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
do { \
- unsigned long _tmp; \
+ unsigned long __tmp; \
switch ((_dst).bytes) { \
case 1: \
__asm__ __volatile__ ( \
@@ -325,7 +382,7 @@ static u16 twobyte_table[256] = {
_op"b %"_bx"3,%1; " \
_POST_EFLAGS("0", "4", "2") \
: "=m" (_eflags), "=m" ((_dst).val), \
- "=&r" (_tmp) \
+ "=&r" (__tmp) \
: _by ((_src).val), "i" (EFLAGS_MASK)); \
break; \
default: \
@@ -426,29 +483,40 @@ static u16 twobyte_table[256] = {
(_type)_x; \
})
+static inline unsigned long ad_mask(struct decode_cache *c)
+{
+ return (1UL << (c->ad_bytes << 3)) - 1;
+}
+
/* Access/update address held in a register, based on addressing mode. */
-#define address_mask(reg) \
- ((c->ad_bytes == sizeof(unsigned long)) ? \
- (reg) : ((reg) & ((1UL << (c->ad_bytes << 3)) - 1)))
-#define register_address(base, reg) \
- ((base) + address_mask(reg))
-#define register_address_increment(reg, inc) \
- do { \
- /* signed type ensures sign extension to long */ \
- int _inc = (inc); \
- if (c->ad_bytes == sizeof(unsigned long)) \
- (reg) += _inc; \
- else \
- (reg) = ((reg) & \
- ~((1UL << (c->ad_bytes << 3)) - 1)) | \
- (((reg) + _inc) & \
- ((1UL << (c->ad_bytes << 3)) - 1)); \
- } while (0)
+static inline unsigned long
+address_mask(struct decode_cache *c, unsigned long reg)
+{
+ if (c->ad_bytes == sizeof(unsigned long))
+ return reg;
+ else
+ return reg & ad_mask(c);
+}
-#define JMP_REL(rel) \
- do { \
- register_address_increment(c->eip, rel); \
- } while (0)
+static inline unsigned long
+register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
+{
+ return base + address_mask(c, reg);
+}
+
+static inline void
+register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
+{
+ if (c->ad_bytes == sizeof(unsigned long))
+ *reg += inc;
+ else
+ *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
+}
+
+static inline void jmp_rel(struct decode_cache *c, int rel)
+{
+ register_address_increment(c, &c->eip, rel);
+}
static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
@@ -763,7 +831,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
struct decode_cache *c = &ctxt->decode;
int rc = 0;
int mode = ctxt->mode;
- int def_op_bytes, def_ad_bytes;
+ int def_op_bytes, def_ad_bytes, group;
/* Shadow copy of register state. Committed on successful emulation. */
@@ -864,12 +932,24 @@ done_prefixes:
c->b = insn_fetch(u8, 1, c->eip);
c->d = twobyte_table[c->b];
}
+ }
- /* Unrecognised? */
- if (c->d == 0) {
- DPRINTF("Cannot emulate %02x\n", c->b);
- return -1;
- }
+ if (c->d & Group) {
+ group = c->d & GroupMask;
+ c->modrm = insn_fetch(u8, 1, c->eip);
+ --c->eip;
+
+ group = (group << 3) + ((c->modrm >> 3) & 7);
+ if ((c->d & GroupDual) && (c->modrm >> 6) == 3)
+ c->d = group2_table[group];
+ else
+ c->d = group_table[group];
+ }
+
+ /* Unrecognised? */
+ if (c->d == 0) {
+ DPRINTF("Cannot emulate %02x\n", c->b);
+ return -1;
}
if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
@@ -924,6 +1004,7 @@ done_prefixes:
*/
if ((c->d & ModRM) && c->modrm_mod == 3) {
c->src.type = OP_REG;
+ c->src.val = c->modrm_val;
break;
}
c->src.type = OP_MEM;
@@ -967,6 +1048,7 @@ done_prefixes:
case DstMem:
if ((c->d & ModRM) && c->modrm_mod == 3) {
c->dst.type = OP_REG;
+ c->dst.val = c->dst.orig_val = c->modrm_val;
break;
}
c->dst.type = OP_MEM;
@@ -984,8 +1066,8 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt)
c->dst.type = OP_MEM;
c->dst.bytes = c->op_bytes;
c->dst.val = c->src.val;
- register_address_increment(c->regs[VCPU_REGS_RSP], -c->op_bytes);
- c->dst.ptr = (void *) register_address(ctxt->ss_base,
+ register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
+ c->dst.ptr = (void *) register_address(c, ctxt->ss_base,
c->regs[VCPU_REGS_RSP]);
}
@@ -995,13 +1077,13 @@ static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
struct decode_cache *c = &ctxt->decode;
int rc;
- rc = ops->read_std(register_address(ctxt->ss_base,
+ rc = ops->read_std(register_address(c, ctxt->ss_base,
c->regs[VCPU_REGS_RSP]),
&c->dst.val, c->dst.bytes, ctxt->vcpu);
if (rc != 0)
return rc;
- register_address_increment(c->regs[VCPU_REGS_RSP], c->dst.bytes);
+ register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->dst.bytes);
return 0;
}
@@ -1043,26 +1125,6 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
switch (c->modrm_reg) {
case 0 ... 1: /* test */
- /*
- * Special case in Grp3: test has an immediate
- * source operand.
- */
- c->src.type = OP_IMM;
- c->src.ptr = (unsigned long *)c->eip;
- c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- if (c->src.bytes == 8)
- c->src.bytes = 4;
- switch (c->src.bytes) {
- case 1:
- c->src.val = insn_fetch(s8, 1, c->eip);
- break;
- case 2:
- c->src.val = insn_fetch(s16, 2, c->eip);
- break;
- case 4:
- c->src.val = insn_fetch(s32, 4, c->eip);
- break;
- }
emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
break;
case 2: /* not */
@@ -1076,7 +1138,6 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
rc = X86EMUL_UNHANDLEABLE;
break;
}
-done:
return rc;
}
@@ -1084,7 +1145,6 @@ static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- int rc;
switch (c->modrm_reg) {
case 0: /* inc */
@@ -1094,36 +1154,11 @@ static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
emulate_1op("dec", c->dst, ctxt->eflags);
break;
case 4: /* jmp abs */
- if (c->b == 0xff)
- c->eip = c->dst.val;
- else {
- DPRINTF("Cannot emulate %02x\n", c->b);
- return X86EMUL_UNHANDLEABLE;
- }
+ c->eip = c->src.val;
break;
case 6: /* push */
-
- /* 64-bit mode: PUSH always pushes a 64-bit operand. */
-
- if (ctxt->mode == X86EMUL_MODE_PROT64) {
- c->dst.bytes = 8;
- rc = ops->read_std((unsigned long)c->dst.ptr,
- &c->dst.val, 8, ctxt->vcpu);
- if (rc != 0)
- return rc;
- }
- register_address_increment(c->regs[VCPU_REGS_RSP],
- -c->dst.bytes);
- rc = ops->write_emulated(register_address(ctxt->ss_base,
- c->regs[VCPU_REGS_RSP]), &c->dst.val,
- c->dst.bytes, ctxt->vcpu);
- if (rc != 0)
- return rc;
- c->dst.type = OP_NONE;
+ emulate_push(ctxt);
break;
- default:
- DPRINTF("Cannot emulate %02x\n", c->b);
- return X86EMUL_UNHANDLEABLE;
}
return 0;
}
@@ -1361,19 +1396,19 @@ special_insn:
c->dst.type = OP_MEM;
c->dst.bytes = c->op_bytes;
c->dst.val = c->src.val;
- register_address_increment(c->regs[VCPU_REGS_RSP],
+ register_address_increment(c, &c->regs[VCPU_REGS_RSP],
-c->op_bytes);
c->dst.ptr = (void *) register_address(
- ctxt->ss_base, c->regs[VCPU_REGS_RSP]);
+ c, ctxt->ss_base, c->regs[VCPU_REGS_RSP]);
break;
case 0x58 ... 0x5f: /* pop reg */
pop_instruction:
- if ((rc = ops->read_std(register_address(ctxt->ss_base,
+ if ((rc = ops->read_std(register_address(c, ctxt->ss_base,
c->regs[VCPU_REGS_RSP]), c->dst.ptr,
c->op_bytes, ctxt->vcpu)) != 0)
goto done;
- register_address_increment(c->regs[VCPU_REGS_RSP],
+ register_address_increment(c, &c->regs[VCPU_REGS_RSP],
c->op_bytes);
c->dst.type = OP_NONE; /* Disable writeback. */
break;
@@ -1393,9 +1428,9 @@ special_insn:
1,
(c->d & ByteOp) ? 1 : c->op_bytes,
c->rep_prefix ?
- address_mask(c->regs[VCPU_REGS_RCX]) : 1,
+ address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
(ctxt->eflags & EFLG_DF),
- register_address(ctxt->es_base,
+ register_address(c, ctxt->es_base,
c->regs[VCPU_REGS_RDI]),
c->rep_prefix,
c->regs[VCPU_REGS_RDX]) == 0) {
@@ -1409,9 +1444,9 @@ special_insn:
0,
(c->d & ByteOp) ? 1 : c->op_bytes,
c->rep_prefix ?
- address_mask(c->regs[VCPU_REGS_RCX]) : 1,
+ address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
(ctxt->eflags & EFLG_DF),
- register_address(c->override_base ?
+ register_address(c, c->override_base ?
*c->override_base :
ctxt->ds_base,
c->regs[VCPU_REGS_RSI]),
@@ -1425,7 +1460,7 @@ special_insn:
int rel = insn_fetch(s8, 1, c->eip);
if (test_cc(c->b, ctxt->eflags))
- JMP_REL(rel);
+ jmp_rel(c, rel);
break;
}
case 0x80 ... 0x83: /* Grp1 */
@@ -1477,7 +1512,7 @@ special_insn:
case 0x88 ... 0x8b: /* mov */
goto mov;
case 0x8d: /* lea r16/r32, m */
- c->dst.val = c->modrm_val;
+ c->dst.val = c->modrm_ea;
break;
case 0x8f: /* pop (sole member of Grp1a) */
rc = emulate_grp1a(ctxt, ops);
@@ -1501,27 +1536,27 @@ special_insn:
case 0xa4 ... 0xa5: /* movs */
c->dst.type = OP_MEM;
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.ptr = (unsigned long *)register_address(
+ c->dst.ptr = (unsigned long *)register_address(c,
ctxt->es_base,
c->regs[VCPU_REGS_RDI]);
- if ((rc = ops->read_emulated(register_address(
+ if ((rc = ops->read_emulated(register_address(c,
c->override_base ? *c->override_base :
ctxt->ds_base,
c->regs[VCPU_REGS_RSI]),
&c->dst.val,
c->dst.bytes, ctxt->vcpu)) != 0)
goto done;
- register_address_increment(c->regs[VCPU_REGS_RSI],
+ register_address_increment(c, &c->regs[VCPU_REGS_RSI],
(ctxt->eflags & EFLG_DF) ? -c->dst.bytes
: c->dst.bytes);
- register_address_increment(c->regs[VCPU_REGS_RDI],
+ register_address_increment(c, &c->regs[VCPU_REGS_RDI],
(ctxt->eflags & EFLG_DF) ? -c->dst.bytes
: c->dst.bytes);
break;
case 0xa6 ... 0xa7: /* cmps */
c->src.type = OP_NONE; /* Disable writeback. */
c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->src.ptr = (unsigned long *)register_address(
+ c->src.ptr = (unsigned long *)register_address(c,
c->override_base ? *c->override_base :
ctxt->ds_base,
c->regs[VCPU_REGS_RSI]);
@@ -1533,7 +1568,7 @@ special_insn:
c->dst.type = OP_NONE; /* Disable writeback. */
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.ptr = (unsigned long *)register_address(
+ c->dst.ptr = (unsigned long *)register_address(c,
ctxt->es_base,
c->regs[VCPU_REGS_RDI]);
if ((rc = ops->read_emulated((unsigned long)c->dst.ptr,
@@ -1546,10 +1581,10 @@ special_insn:
emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
- register_address_increment(c->regs[VCPU_REGS_RSI],
+ register_address_increment(c, &c->regs[VCPU_REGS_RSI],
(ctxt->eflags & EFLG_DF) ? -c->src.bytes
: c->src.bytes);
- register_address_increment(c->regs[VCPU_REGS_RDI],
+ register_address_increment(c, &c->regs[VCPU_REGS_RDI],
(ctxt->eflags & EFLG_DF) ? -c->dst.bytes
: c->dst.bytes);
@@ -1557,11 +1592,11 @@ special_insn:
case 0xaa ... 0xab: /* stos */
c->dst.type = OP_MEM;
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.ptr = (unsigned long *)register_address(
+ c->dst.ptr = (unsigned long *)register_address(c,
ctxt->es_base,
c->regs[VCPU_REGS_RDI]);
c->dst.val = c->regs[VCPU_REGS_RAX];
- register_address_increment(c->regs[VCPU_REGS_RDI],
+ register_address_increment(c, &c->regs[VCPU_REGS_RDI],
(ctxt->eflags & EFLG_DF) ? -c->dst.bytes
: c->dst.bytes);
break;
@@ -1569,7 +1604,7 @@ special_insn:
c->dst.type = OP_REG;
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
- if ((rc = ops->read_emulated(register_address(
+ if ((rc = ops->read_emulated(register_address(c,
c->override_base ? *c->override_base :
ctxt->ds_base,
c->regs[VCPU_REGS_RSI]),
@@ -1577,7 +1612,7 @@ special_insn:
c->dst.bytes,
ctxt->vcpu)) != 0)
goto done;
- register_address_increment(c->regs[VCPU_REGS_RSI],
+ register_address_increment(c, &c->regs[VCPU_REGS_RSI],
(ctxt->eflags & EFLG_DF) ? -c->dst.bytes
: c->dst.bytes);
break;
@@ -1616,14 +1651,14 @@ special_insn:
goto cannot_emulate;
}
c->src.val = (unsigned long) c->eip;
- JMP_REL(rel);
+ jmp_rel(c, rel);
c->op_bytes = c->ad_bytes;
emulate_push(ctxt);
break;
}
case 0xe9: /* jmp rel */
case 0xeb: /* jmp rel short */
- JMP_REL(c->src.val);
+ jmp_rel(c, c->src.val);
c->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xf4: /* hlt */
@@ -1690,6 +1725,8 @@ twobyte_insn:
goto done;
kvm_emulate_hypercall(ctxt->vcpu);
+ /* Disable writeback. */
+ c->dst.type = OP_NONE;
break;
case 2: /* lgdt */
rc = read_descriptor(ctxt, ops, c->src.ptr,
@@ -1697,6 +1734,8 @@ twobyte_insn:
if (rc)
goto done;
realmode_lgdt(ctxt->vcpu, size, address);
+ /* Disable writeback. */
+ c->dst.type = OP_NONE;
break;
case 3: /* lidt/vmmcall */
if (c->modrm_mod == 3 && c->modrm_rm == 1) {
@@ -1712,27 +1751,25 @@ twobyte_insn:
goto done;
realmode_lidt(ctxt->vcpu, size, address);
}
+ /* Disable writeback. */
+ c->dst.type = OP_NONE;
break;
case 4: /* smsw */
- if (c->modrm_mod != 3)
- goto cannot_emulate;
- *(u16 *)&c->regs[c->modrm_rm]
- = realmode_get_cr(ctxt->vcpu, 0);
+ c->dst.bytes = 2;
+ c->dst.val = realmode_get_cr(ctxt->vcpu, 0);
break;
case 6: /* lmsw */
- if (c->modrm_mod != 3)
- goto cannot_emulate;
- realmode_lmsw(ctxt->vcpu, (u16)c->modrm_val,
- &ctxt->eflags);
+ realmode_lmsw(ctxt->vcpu, (u16)c->src.val,
+ &ctxt->eflags);
break;
case 7: /* invlpg*/
emulate_invlpg(ctxt->vcpu, memop);
+ /* Disable writeback. */
+ c->dst.type = OP_NONE;
break;
default:
goto cannot_emulate;
}
- /* Disable writeback. */
- c->dst.type = OP_NONE;
break;
case 0x06:
emulate_clts(ctxt->vcpu);
@@ -1823,7 +1860,7 @@ twobyte_insn:
goto cannot_emulate;
}
if (test_cc(c->b, ctxt->eflags))
- JMP_REL(rel);
+ jmp_rel(c, rel);
c->dst.type = OP_NONE;
break;
}
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 4a4761892951..de236e419cb5 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -287,47 +287,17 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
pkmap_page_table = pte;
}
-static void __meminit free_new_highpage(struct page *page)
-{
- init_page_count(page);
- __free_page(page);
- totalhigh_pages++;
-}
-
void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
{
if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
ClearPageReserved(page);
- free_new_highpage(page);
+ init_page_count(page);
+ __free_page(page);
+ totalhigh_pages++;
} else
SetPageReserved(page);
}
-static int __meminit
-add_one_highpage_hotplug(struct page *page, unsigned long pfn)
-{
- free_new_highpage(page);
- totalram_pages++;
-#ifdef CONFIG_FLATMEM
- max_mapnr = max(pfn, max_mapnr);
-#endif
- num_physpages++;
-
- return 0;
-}
-
-/*
- * Not currently handling the NUMA case.
- * Assuming single node and all memory that
- * has been added dynamically that would be
- * onlined here is in HIGHMEM.
- */
-void __meminit online_page(struct page *page)
-{
- ClearPageReserved(page);
- add_one_highpage_hotplug(page, page_to_pfn(page));
-}
-
#ifndef CONFIG_NUMA
static void __init set_highmem_pages_init(int bad_ppro)
{
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 5fbb8652cf59..32ba13b0f818 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -620,15 +620,6 @@ void __init paging_init(void)
/*
* Memory hotplug specific functions
*/
-void online_page(struct page *page)
-{
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalram_pages++;
- num_physpages++;
-}
-
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Memory is added always to NORMAL zone. This means you will never get
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index d176b23110cc..804de18abcc2 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -117,8 +117,8 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
-static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
- unsigned long prot_val)
+static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+ unsigned long size, unsigned long prot_val, void *caller)
{
unsigned long pfn, offset, vaddr;
resource_size_t last_addr;
@@ -212,7 +212,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
/*
* Ok, go for it..
*/
- area = get_vm_area(size, VM_IOREMAP);
+ area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (!area)
return NULL;
area->phys_addr = phys_addr;
@@ -255,7 +255,8 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
*/
void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
{
- return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
+ return __ioremap_caller(phys_addr, size, _PAGE_CACHE_UC,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_nocache);
@@ -272,7 +273,8 @@ EXPORT_SYMBOL(ioremap_nocache);
void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
{
if (pat_wc_enabled)
- return __ioremap(phys_addr, size, _PAGE_CACHE_WC);
+ return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
+ __builtin_return_address(0));
else
return ioremap_nocache(phys_addr, size);
}
@@ -280,7 +282,8 @@ EXPORT_SYMBOL(ioremap_wc);
void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
{
- return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
+ return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_cache);
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index e7ca7fc48d12..277446cd30b6 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -387,8 +387,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
break;
}
- printk(KERN_INFO "Overlap at 0x%Lx-0x%Lx\n",
- saved_ptr->start, saved_ptr->end);
+ pr_debug(KERN_INFO "Overlap at 0x%Lx-0x%Lx\n",
+ saved_ptr->start, saved_ptr->end);
/* No conflict. Go ahead and add this new entry */
list_add(&new_entry->nd, &saved_ptr->nd);
new_entry = NULL;
@@ -510,7 +510,6 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
{
u64 offset = ((u64) pfn) << PAGE_SHIFT;
unsigned long flags = _PAGE_CACHE_UC_MINUS;
- unsigned long ret_flags;
int retval;
if (!range_is_allowed(pfn, size))
@@ -549,14 +548,12 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
if (flags != _PAGE_CACHE_UC_MINUS) {
retval = reserve_memtype(offset, offset + size, flags, NULL);
} else {
- retval = reserve_memtype(offset, offset + size, -1, &ret_flags);
+ retval = reserve_memtype(offset, offset + size, -1, &flags);
}
if (retval < 0)
return 0;
- flags = ret_flags;
-
if (pfn <= max_pfn_mapped &&
ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
free_memtype(offset, offset + size);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 6cbcf65609ad..126766d43aea 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -387,7 +387,7 @@ static void xen_do_pin(unsigned level, unsigned long pfn)
static int pin_page(struct page *page, enum pt_level level)
{
- unsigned pgfl = test_and_set_bit(PG_pinned, &page->flags);
+ unsigned pgfl = TestSetPagePinned(page);
int flush;
if (pgfl)
@@ -468,7 +468,7 @@ void __init xen_mark_init_mm_pinned(void)
static int unpin_page(struct page *page, enum pt_level level)
{
- unsigned pgfl = test_and_clear_bit(PG_pinned, &page->flags);
+ unsigned pgfl = TestClearPagePinned(page);
if (pgfl && !PageHighMem(page)) {
void *pt = lowmem_page_address(page);
diff --git a/block/bsg.c b/block/bsg.c
index f51172ed27c2..23ea4fd1a66d 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -699,14 +699,26 @@ static struct bsg_device *bsg_alloc_device(void)
return bd;
}
+static void bsg_kref_release_function(struct kref *kref)
+{
+ struct bsg_class_device *bcd =
+ container_of(kref, struct bsg_class_device, ref);
+
+ if (bcd->release)
+ bcd->release(bcd->parent);
+
+ put_device(bcd->parent);
+}
+
static int bsg_put_device(struct bsg_device *bd)
{
- int ret = 0;
- struct device *dev = bd->queue->bsg_dev.dev;
+ int ret = 0, do_free;
+ struct request_queue *q = bd->queue;
mutex_lock(&bsg_mutex);
- if (!atomic_dec_and_test(&bd->ref_count))
+ do_free = atomic_dec_and_test(&bd->ref_count);
+ if (!do_free)
goto out;
dprintk("%s: tearing down\n", bd->name);
@@ -723,12 +735,13 @@ static int bsg_put_device(struct bsg_device *bd)
*/
ret = bsg_complete_all_commands(bd);
- blk_put_queue(bd->queue);
hlist_del(&bd->dev_list);
kfree(bd);
out:
mutex_unlock(&bsg_mutex);
- put_device(dev);
+ kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
+ if (do_free)
+ blk_put_queue(q);
return ret;
}
@@ -796,7 +809,7 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
mutex_lock(&bsg_mutex);
bcd = idr_find(&bsg_minor_idr, iminor(inode));
if (bcd)
- get_device(bcd->dev);
+ kref_get(&bcd->ref);
mutex_unlock(&bsg_mutex);
if (!bcd)
@@ -808,7 +821,7 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
bd = bsg_add_device(inode, bcd->queue, file);
if (IS_ERR(bd))
- put_device(bcd->dev);
+ kref_put(&bcd->ref, bsg_kref_release_function);
return bd;
}
@@ -947,14 +960,14 @@ void bsg_unregister_queue(struct request_queue *q)
idr_remove(&bsg_minor_idr, bcd->minor);
sysfs_remove_link(&q->kobj, "bsg");
device_unregister(bcd->class_dev);
- put_device(bcd->dev);
bcd->class_dev = NULL;
+ kref_put(&bcd->ref, bsg_kref_release_function);
mutex_unlock(&bsg_mutex);
}
EXPORT_SYMBOL_GPL(bsg_unregister_queue);
-int bsg_register_queue(struct request_queue *q, struct device *gdev,
- const char *name)
+int bsg_register_queue(struct request_queue *q, struct device *parent,
+ const char *name, void (*release)(struct device *))
{
struct bsg_class_device *bcd;
dev_t dev;
@@ -965,7 +978,7 @@ int bsg_register_queue(struct request_queue *q, struct device *gdev,
if (name)
devname = name;
else
- devname = gdev->bus_id;
+ devname = parent->bus_id;
/*
* we need a proper transport to send commands, not a stacked device
@@ -996,9 +1009,11 @@ int bsg_register_queue(struct request_queue *q, struct device *gdev,
bcd->minor = minor;
bcd->queue = q;
- bcd->dev = get_device(gdev);
+ bcd->parent = get_device(parent);
+ bcd->release = release;
+ kref_init(&bcd->ref);
dev = MKDEV(bsg_major, bcd->minor);
- class_dev = device_create(bsg_class, gdev, dev, "%s", devname);
+ class_dev = device_create(bsg_class, parent, dev, "%s", devname);
if (IS_ERR(class_dev)) {
ret = PTR_ERR(class_dev);
goto put_dev;
@@ -1017,7 +1032,7 @@ int bsg_register_queue(struct request_queue *q, struct device *gdev,
unregister_class_dev:
device_unregister(class_dev);
put_dev:
- put_device(gdev);
+ put_device(parent);
remove_idr:
idr_remove(&bsg_minor_idr, minor);
unlock:
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 788da9781f80..0d90ff5fd117 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -418,13 +418,12 @@ static void acpi_processor_idle(void)
cx = pr->power.state;
if (!cx || acpi_idle_suspend) {
- if (pm_idle_save)
- pm_idle_save();
- else
+ if (pm_idle_save) {
+ pm_idle_save(); /* enables IRQs */
+ } else {
acpi_safe_halt();
-
- if (irqs_disabled())
local_irq_enable();
+ }
return;
}
@@ -520,10 +519,12 @@ static void acpi_processor_idle(void)
* Use the appropriate idle routine, the one that would
* be used without acpi C-states.
*/
- if (pm_idle_save)
- pm_idle_save();
- else
+ if (pm_idle_save) {
+ pm_idle_save(); /* enables IRQs */
+ } else {
acpi_safe_halt();
+ local_irq_enable();
+ }
/*
* TBD: Can't get time duration while in C1, as resumes
@@ -534,8 +535,6 @@ static void acpi_processor_idle(void)
* skew otherwise.
*/
sleep_ticks = 0xFFFFFFFF;
- if (irqs_disabled())
- local_irq_enable();
break;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 1bcecc7dd2ca..766bd25d3376 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1710,7 +1710,6 @@ static int acpi_thermal_resume(struct acpi_device *device)
return AE_OK;
}
-#ifdef CONFIG_DMI
static int thermal_act(const struct dmi_system_id *d) {
if (act == 0) {
@@ -1785,7 +1784,6 @@ static struct dmi_system_id thermal_dmi_table[] __initdata = {
},
{}
};
-#endif /* CONFIG_DMI */
static int __init acpi_thermal_init(void)
{
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 7bd76639544c..e8e38faeafd8 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -319,7 +319,7 @@ out:
#ifdef CONFIG_BLK_DEV_XIP
static int brd_direct_access (struct block_device *bdev, sector_t sector,
- unsigned long *data)
+ void **kaddr, unsigned long *pfn)
{
struct brd_device *brd = bdev->bd_disk->private_data;
struct page *page;
@@ -333,7 +333,8 @@ static int brd_direct_access (struct block_device *bdev, sector_t sector,
page = brd_insert_page(brd, sector);
if (!page)
return -ENOMEM;
- *data = (unsigned long)page_address(page);
+ *kaddr = page_address(page);
+ *pfn = page_to_pfn(page);
return 0;
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 2906ee7bd298..929d4fa73fd9 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -732,9 +732,16 @@ config NVRAM
To compile this driver as a module, choose M here: the
module will be called nvram.
+#
+# These legacy RTC drivers just cause too many conflicts with the generic
+# RTC framework ... let's not even try to coexist any more.
+#
+if RTC_LIB=n
+
config RTC
tristate "Enhanced Real Time Clock Support"
- depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV && !ARM && !SUPERH && !S390 && !AVR32
+ depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV \
+ && !ARM && !SUPERH && !S390 && !AVR32
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -840,6 +847,8 @@ config DS1302
will get access to the real time clock (or hardware clock) built
into your computer.
+endif # RTC_LIB
+
config COBALT_LCD
bool "Support for Cobalt LCD"
depends on MIPS_COBALT
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 4e84d233e5a2..583356426dfb 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -189,20 +189,20 @@ typedef struct _mgslpc_info {
u32 pending_bh;
- int bh_running;
- int bh_requested;
+ bool bh_running;
+ bool bh_requested;
int dcd_chkcount; /* check counts to prevent */
int cts_chkcount; /* too many IRQs if a signal */
int dsr_chkcount; /* is floating */
int ri_chkcount;
- int rx_enabled;
- int rx_overflow;
+ bool rx_enabled;
+ bool rx_overflow;
- int tx_enabled;
- int tx_active;
- int tx_aborting;
+ bool tx_enabled;
+ bool tx_active;
+ bool tx_aborting;
u32 idle_mode;
int if_mode; /* serial interface selection (RS-232, v.35 etc) */
@@ -216,12 +216,12 @@ typedef struct _mgslpc_info {
unsigned char serial_signals; /* current serial signal states */
- char irq_occurred; /* for diagnostics use */
+ bool irq_occurred; /* for diagnostics use */
char testing_irq;
unsigned int init_error; /* startup error (DIAGS) */
char flag_buf[MAX_ASYNC_BUFFER_SIZE];
- BOOLEAN drop_rts_on_tx_done;
+ bool drop_rts_on_tx_done;
struct _input_signal_events input_signal_events;
@@ -402,8 +402,8 @@ static void hdlcdev_exit(MGSLPC_INFO *info);
static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit);
-static BOOLEAN register_test(MGSLPC_INFO *info);
-static BOOLEAN irq_test(MGSLPC_INFO *info);
+static bool register_test(MGSLPC_INFO *info);
+static bool irq_test(MGSLPC_INFO *info);
static int adapter_test(MGSLPC_INFO *info);
static int claim_resources(MGSLPC_INFO *info);
@@ -411,7 +411,7 @@ static void release_resources(MGSLPC_INFO *info);
static void mgslpc_add_device(MGSLPC_INFO *info);
static void mgslpc_remove_device(MGSLPC_INFO *info);
-static int rx_get_frame(MGSLPC_INFO *info);
+static bool rx_get_frame(MGSLPC_INFO *info);
static void rx_reset_buffers(MGSLPC_INFO *info);
static int rx_alloc_buffers(MGSLPC_INFO *info);
static void rx_free_buffers(MGSLPC_INFO *info);
@@ -719,7 +719,7 @@ static int mgslpc_resume(struct pcmcia_device *link)
}
-static inline int mgslpc_paranoia_check(MGSLPC_INFO *info,
+static inline bool mgslpc_paranoia_check(MGSLPC_INFO *info,
char *name, const char *routine)
{
#ifdef MGSLPC_PARANOIA_CHECK
@@ -730,17 +730,17 @@ static inline int mgslpc_paranoia_check(MGSLPC_INFO *info,
if (!info) {
printk(badinfo, name, routine);
- return 1;
+ return true;
}
if (info->magic != MGSLPC_MAGIC) {
printk(badmagic, name, routine);
- return 1;
+ return true;
}
#else
if (!info)
- return 1;
+ return true;
#endif
- return 0;
+ return false;
}
@@ -752,16 +752,16 @@ static inline int mgslpc_paranoia_check(MGSLPC_INFO *info,
#define CMD_TXEOM BIT1 // transmit end message
#define CMD_TXRESET BIT0 // transmit reset
-static BOOLEAN wait_command_complete(MGSLPC_INFO *info, unsigned char channel)
+static bool wait_command_complete(MGSLPC_INFO *info, unsigned char channel)
{
int i = 0;
/* wait for command completion */
while (read_reg(info, (unsigned char)(channel+STAR)) & BIT2) {
udelay(1);
if (i++ == 1000)
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
static void issue_command(MGSLPC_INFO *info, unsigned char channel, unsigned char cmd)
@@ -825,8 +825,8 @@ static int bh_action(MGSLPC_INFO *info)
if (!rc) {
/* Mark BH routine as complete */
- info->bh_running = 0;
- info->bh_requested = 0;
+ info->bh_running = false;
+ info->bh_requested = false;
}
spin_unlock_irqrestore(&info->lock,flags);
@@ -846,7 +846,7 @@ static void bh_handler(struct work_struct *work)
printk( "%s(%d):bh_handler(%s) entry\n",
__FILE__,__LINE__,info->device_name);
- info->bh_running = 1;
+ info->bh_running = true;
while((action = bh_action(info)) != 0) {
@@ -913,7 +913,7 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
/* no more free buffers */
issue_command(info, CHA, CMD_RXRESET);
info->pending_bh |= BH_RECEIVE;
- info->rx_overflow = 1;
+ info->rx_overflow = true;
info->icount.buf_overrun++;
return;
}
@@ -1032,8 +1032,8 @@ static void tx_done(MGSLPC_INFO *info)
if (!info->tx_active)
return;
- info->tx_active = 0;
- info->tx_aborting = 0;
+ info->tx_active = false;
+ info->tx_aborting = false;
if (info->params.mode == MGSL_MODE_ASYNC)
return;
@@ -1047,7 +1047,7 @@ static void tx_done(MGSLPC_INFO *info)
info->serial_signals &= ~SerialSignal_RTS;
set_signals(info);
}
- info->drop_rts_on_tx_done = 0;
+ info->drop_rts_on_tx_done = false;
}
#if SYNCLINK_GENERIC_HDLC
@@ -1081,7 +1081,7 @@ static void tx_ready(MGSLPC_INFO *info)
return;
}
if (!info->tx_count)
- info->tx_active = 0;
+ info->tx_active = false;
}
if (!info->tx_count)
@@ -1261,7 +1261,7 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
{
isr = read_reg16(info, CHA + ISR);
if (isr & IRQ_TIMER) {
- info->irq_occurred = 1;
+ info->irq_occurred = true;
irq_disable(info, CHA, IRQ_TIMER);
}
@@ -1318,7 +1318,7 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
printk("%s(%d):%s queueing bh task.\n",
__FILE__,__LINE__,info->device_name);
schedule_work(&info->task);
- info->bh_requested = 1;
+ info->bh_requested = true;
}
spin_unlock(&info->lock);
@@ -1990,7 +1990,7 @@ static int tx_abort(MGSLPC_INFO * info)
* This results in underrun and abort transmission.
*/
info->tx_count = info->tx_put = info->tx_get = 0;
- info->tx_aborting = TRUE;
+ info->tx_aborting = true;
}
spin_unlock_irqrestore(&info->lock,flags);
return 0;
@@ -2589,7 +2589,8 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
{
DECLARE_WAITQUEUE(wait, current);
int retval;
- int do_clocal = 0, extra_count = 0;
+ bool do_clocal = false;
+ bool extra_count = false;
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
@@ -2604,7 +2605,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
}
if (tty->termios->c_cflag & CLOCAL)
- do_clocal = 1;
+ do_clocal = true;
/* Wait for carrier detect and the line to become
* free (i.e., not in use by the callout). While we are in
@@ -2622,7 +2623,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
spin_lock_irqsave(&info->lock, flags);
if (!tty_hung_up_p(filp)) {
- extra_count = 1;
+ extra_count = true;
info->count--;
}
spin_unlock_irqrestore(&info->lock, flags);
@@ -3493,8 +3494,8 @@ static void rx_stop(MGSLPC_INFO *info)
/* MODE:03 RAC Receiver Active, 0=inactive */
clear_reg_bits(info, CHA + MODE, BIT3);
- info->rx_enabled = 0;
- info->rx_overflow = 0;
+ info->rx_enabled = false;
+ info->rx_overflow = false;
}
static void rx_start(MGSLPC_INFO *info)
@@ -3504,13 +3505,13 @@ static void rx_start(MGSLPC_INFO *info)
__FILE__,__LINE__, info->device_name );
rx_reset_buffers(info);
- info->rx_enabled = 0;
- info->rx_overflow = 0;
+ info->rx_enabled = false;
+ info->rx_overflow = false;
/* MODE:03 RAC Receiver Active, 1=active */
set_reg_bits(info, CHA + MODE, BIT3);
- info->rx_enabled = 1;
+ info->rx_enabled = true;
}
static void tx_start(MGSLPC_INFO *info)
@@ -3523,24 +3524,24 @@ static void tx_start(MGSLPC_INFO *info)
/* If auto RTS enabled and RTS is inactive, then assert */
/* RTS and set a flag indicating that the driver should */
/* negate RTS when the transmission completes. */
- info->drop_rts_on_tx_done = 0;
+ info->drop_rts_on_tx_done = false;
if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
get_signals(info);
if (!(info->serial_signals & SerialSignal_RTS)) {
info->serial_signals |= SerialSignal_RTS;
set_signals(info);
- info->drop_rts_on_tx_done = 1;
+ info->drop_rts_on_tx_done = true;
}
}
if (info->params.mode == MGSL_MODE_ASYNC) {
if (!info->tx_active) {
- info->tx_active = 1;
+ info->tx_active = true;
tx_ready(info);
}
} else {
- info->tx_active = 1;
+ info->tx_active = true;
tx_ready(info);
mod_timer(&info->tx_timer, jiffies +
msecs_to_jiffies(5000));
@@ -3548,7 +3549,7 @@ static void tx_start(MGSLPC_INFO *info)
}
if (!info->tx_enabled)
- info->tx_enabled = 1;
+ info->tx_enabled = true;
}
static void tx_stop(MGSLPC_INFO *info)
@@ -3559,8 +3560,8 @@ static void tx_stop(MGSLPC_INFO *info)
del_timer(&info->tx_timer);
- info->tx_enabled = 0;
- info->tx_active = 0;
+ info->tx_enabled = false;
+ info->tx_active = false;
}
/* Reset the adapter to a known state and prepare it for further use.
@@ -3860,19 +3861,19 @@ static void rx_reset_buffers(MGSLPC_INFO *info)
/* Attempt to return a received HDLC frame
* Only frames received without errors are returned.
*
- * Returns 1 if frame returned, otherwise 0
+ * Returns true if frame returned, otherwise false
*/
-static int rx_get_frame(MGSLPC_INFO *info)
+static bool rx_get_frame(MGSLPC_INFO *info)
{
unsigned short status;
RXBUF *buf;
unsigned int framesize = 0;
unsigned long flags;
struct tty_struct *tty = info->tty;
- int return_frame = 0;
+ bool return_frame = false;
if (info->rx_frame_count == 0)
- return 0;
+ return false;
buf = (RXBUF*)(info->rx_buf + (info->rx_get * info->rx_buf_size));
@@ -3891,7 +3892,7 @@ static int rx_get_frame(MGSLPC_INFO *info)
else if (!(status & BIT5)) {
info->icount.rxcrc++;
if (info->params.crc_type & HDLC_CRC_RETURN_EX)
- return_frame = 1;
+ return_frame = true;
}
framesize = 0;
#if SYNCLINK_GENERIC_HDLC
@@ -3902,7 +3903,7 @@ static int rx_get_frame(MGSLPC_INFO *info)
}
#endif
} else
- return_frame = 1;
+ return_frame = true;
if (return_frame)
framesize = buf->count;
@@ -3945,16 +3946,16 @@ static int rx_get_frame(MGSLPC_INFO *info)
info->rx_get = 0;
spin_unlock_irqrestore(&info->lock,flags);
- return 1;
+ return true;
}
-static BOOLEAN register_test(MGSLPC_INFO *info)
+static bool register_test(MGSLPC_INFO *info)
{
static unsigned char patterns[] =
{ 0x00, 0xff, 0xaa, 0x55, 0x69, 0x96, 0x0f };
static unsigned int count = ARRAY_SIZE(patterns);
unsigned int i;
- BOOLEAN rc = TRUE;
+ bool rc = true;
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
@@ -3965,7 +3966,7 @@ static BOOLEAN register_test(MGSLPC_INFO *info)
write_reg(info, XAD2, patterns[(i + 1) % count]);
if ((read_reg(info, XAD1) != patterns[i]) ||
(read_reg(info, XAD2) != patterns[(i + 1) % count])) {
- rc = FALSE;
+ rc = false;
break;
}
}
@@ -3974,7 +3975,7 @@ static BOOLEAN register_test(MGSLPC_INFO *info)
return rc;
}
-static BOOLEAN irq_test(MGSLPC_INFO *info)
+static bool irq_test(MGSLPC_INFO *info)
{
unsigned long end_time;
unsigned long flags;
@@ -3982,10 +3983,10 @@ static BOOLEAN irq_test(MGSLPC_INFO *info)
spin_lock_irqsave(&info->lock,flags);
reset_device(info);
- info->testing_irq = TRUE;
+ info->testing_irq = true;
hdlc_mode(info);
- info->irq_occurred = FALSE;
+ info->irq_occurred = false;
/* init hdlc mode */
@@ -4000,13 +4001,13 @@ static BOOLEAN irq_test(MGSLPC_INFO *info)
msleep_interruptible(10);
}
- info->testing_irq = FALSE;
+ info->testing_irq = false;
spin_lock_irqsave(&info->lock,flags);
reset_device(info);
spin_unlock_irqrestore(&info->lock,flags);
- return info->irq_occurred ? TRUE : FALSE;
+ return info->irq_occurred;
}
static int adapter_test(MGSLPC_INFO *info)
@@ -4079,7 +4080,7 @@ static void tx_timeout(unsigned long context)
info->icount.txtimeout++;
}
spin_lock_irqsave(&info->lock,flags);
- info->tx_active = 0;
+ info->tx_active = false;
info->tx_count = info->tx_put = info->tx_get = 0;
spin_unlock_irqrestore(&info->lock,flags);
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 5c3142b6f1fc..e2ec2ee4cf79 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -88,6 +88,7 @@
#ifdef CONFIG_SPARC32
#include <linux/pci.h>
+#include <linux/jiffies.h>
#include <asm/ebus.h>
static unsigned long rtc_port;
@@ -1316,7 +1317,8 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
* Once the read clears, read the RTC time (again via ioctl). Easy.
*/
- while (rtc_is_updating() != 0 && jiffies - uip_watchdog < 2*HZ/100)
+ while (rtc_is_updating() != 0 &&
+ time_before(jiffies, uip_watchdog + 2*HZ/100))
cpu_relax();
/*
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index a3237d48a584..fadab1d9510f 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -218,9 +218,9 @@ struct mgsl_struct {
u32 pending_bh;
- int bh_running; /* Protection from multiple */
+ bool bh_running; /* Protection from multiple */
int isr_overflow;
- int bh_requested;
+ bool bh_requested;
int dcd_chkcount; /* check counts to prevent */
int cts_chkcount; /* too many IRQs if a signal */
@@ -250,12 +250,12 @@ struct mgsl_struct {
int tx_holding_count; /* number of tx holding buffers waiting */
struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
- int rx_enabled;
- int rx_overflow;
- int rx_rcc_underrun;
+ bool rx_enabled;
+ bool rx_overflow;
+ bool rx_rcc_underrun;
- int tx_enabled;
- int tx_active;
+ bool tx_enabled;
+ bool tx_active;
u32 idle_mode;
u16 cmr_value;
@@ -269,14 +269,14 @@ struct mgsl_struct {
unsigned int io_base; /* base I/O address of adapter */
unsigned int io_addr_size; /* size of the I/O address range */
- int io_addr_requested; /* nonzero if I/O address requested */
+ bool io_addr_requested; /* true if I/O address requested */
unsigned int irq_level; /* interrupt level */
unsigned long irq_flags;
- int irq_requested; /* nonzero if IRQ requested */
+ bool irq_requested; /* true if IRQ requested */
unsigned int dma_level; /* DMA channel */
- int dma_requested; /* nonzero if dma channel requested */
+ bool dma_requested; /* true if dma channel requested */
u16 mbre_bit;
u16 loopback_bits;
@@ -286,27 +286,27 @@ struct mgsl_struct {
unsigned char serial_signals; /* current serial signal states */
- int irq_occurred; /* for diagnostics use */
+ bool irq_occurred; /* for diagnostics use */
unsigned int init_error; /* Initialization startup error (DIAGS) */
int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
u32 last_mem_alloc;
unsigned char* memory_base; /* shared memory address (PCI only) */
u32 phys_memory_base;
- int shared_mem_requested;
+ bool shared_mem_requested;
unsigned char* lcr_base; /* local config registers (PCI only) */
u32 phys_lcr_base;
u32 lcr_offset;
- int lcr_mem_requested;
+ bool lcr_mem_requested;
u32 misc_ctrl_value;
char flag_buf[MAX_ASYNC_BUFFER_SIZE];
char char_buf[MAX_ASYNC_BUFFER_SIZE];
- BOOLEAN drop_rts_on_tx_done;
+ bool drop_rts_on_tx_done;
- BOOLEAN loopmode_insert_requested;
- BOOLEAN loopmode_send_done_requested;
+ bool loopmode_insert_requested;
+ bool loopmode_send_done_requested;
struct _input_signal_events input_signal_events;
@@ -752,10 +752,10 @@ static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int coun
/*
* Adapter diagnostic routines
*/
-static BOOLEAN mgsl_register_test( struct mgsl_struct *info );
-static BOOLEAN mgsl_irq_test( struct mgsl_struct *info );
-static BOOLEAN mgsl_dma_test( struct mgsl_struct *info );
-static BOOLEAN mgsl_memory_test( struct mgsl_struct *info );
+static bool mgsl_register_test( struct mgsl_struct *info );
+static bool mgsl_irq_test( struct mgsl_struct *info );
+static bool mgsl_dma_test( struct mgsl_struct *info );
+static bool mgsl_memory_test( struct mgsl_struct *info );
static int mgsl_adapter_test( struct mgsl_struct *info );
/*
@@ -770,8 +770,8 @@ static struct mgsl_struct* mgsl_allocate_device(void);
* DMA buffer manupulation functions.
*/
static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
-static int mgsl_get_rx_frame( struct mgsl_struct *info );
-static int mgsl_get_raw_rx_frame( struct mgsl_struct *info );
+static bool mgsl_get_rx_frame( struct mgsl_struct *info );
+static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
static int num_free_tx_dma_buffers(struct mgsl_struct *info);
@@ -791,7 +791,7 @@ static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
-static int load_next_tx_holding_buffer(struct mgsl_struct *info);
+static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
/*
@@ -847,7 +847,7 @@ static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
static int mgsl_loopmode_send_done( struct mgsl_struct * info );
/* set non-zero on successful registration with PCI subsystem */
-static int pci_registered;
+static bool pci_registered;
/*
* Global linked list of SyncLink devices
@@ -1054,8 +1054,8 @@ static int mgsl_bh_action(struct mgsl_struct *info)
if (!rc) {
/* Mark BH routine as complete */
- info->bh_running = 0;
- info->bh_requested = 0;
+ info->bh_running = false;
+ info->bh_requested = false;
}
spin_unlock_irqrestore(&info->irq_spinlock,flags);
@@ -1079,7 +1079,7 @@ static void mgsl_bh_handler(struct work_struct *work)
printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
__FILE__,__LINE__,info->device_name);
- info->bh_running = 1;
+ info->bh_running = true;
while((action = mgsl_bh_action(info)) != 0) {
@@ -1113,7 +1113,7 @@ static void mgsl_bh_handler(struct work_struct *work)
static void mgsl_bh_receive(struct mgsl_struct *info)
{
- int (*get_rx_frame)(struct mgsl_struct *info) =
+ bool (*get_rx_frame)(struct mgsl_struct *info) =
(info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
if ( debug_level >= DEBUG_LEVEL_BH )
@@ -1187,7 +1187,7 @@ static void mgsl_isr_receive_status( struct mgsl_struct *info )
usc_loopmode_active(info) )
{
++info->icount.rxabort;
- info->loopmode_insert_requested = FALSE;
+ info->loopmode_insert_requested = false;
/* clear CMR:13 to start echoing RxD to TxD */
info->cmr_value &= ~BIT13;
@@ -1257,7 +1257,7 @@ static void mgsl_isr_transmit_status( struct mgsl_struct *info )
else
info->icount.txunder++;
- info->tx_active = 0;
+ info->tx_active = false;
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
del_timer(&info->tx_timer);
@@ -1267,7 +1267,7 @@ static void mgsl_isr_transmit_status( struct mgsl_struct *info )
info->serial_signals &= ~SerialSignal_RTS;
usc_set_serial_signals( info );
}
- info->drop_rts_on_tx_done = 0;
+ info->drop_rts_on_tx_done = false;
}
#if SYNCLINK_GENERIC_HDLC
@@ -1403,7 +1403,7 @@ static void mgsl_isr_io_pin( struct mgsl_struct *info )
usc_OutReg( info, SICR,
(unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
- info->irq_occurred = 1;
+ info->irq_occurred = true;
}
} /* end of mgsl_isr_io_pin() */
@@ -1431,7 +1431,7 @@ static void mgsl_isr_transmit_data( struct mgsl_struct *info )
if ( info->xmit_cnt )
usc_load_txfifo( info );
else
- info->tx_active = 0;
+ info->tx_active = false;
if (info->xmit_cnt < WAKEUP_CHARS)
info->pending_bh |= BH_TRANSMIT;
@@ -1568,7 +1568,7 @@ static void mgsl_isr_misc( struct mgsl_struct *info )
/* schedule BH handler to restart receiver */
info->pending_bh |= BH_RECEIVE;
- info->rx_rcc_underrun = 1;
+ info->rx_rcc_underrun = true;
}
usc_ClearIrqPendingBits( info, MISC );
@@ -1626,7 +1626,7 @@ static void mgsl_isr_receive_dma( struct mgsl_struct *info )
info->pending_bh |= BH_RECEIVE;
if ( status & BIT3 ) {
- info->rx_overflow = 1;
+ info->rx_overflow = true;
info->icount.buf_overrun++;
}
@@ -1745,7 +1745,7 @@ static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
printk("%s(%d):%s queueing bh task.\n",
__FILE__,__LINE__,info->device_name);
schedule_work(&info->task);
- info->bh_requested = 1;
+ info->bh_requested = true;
}
spin_unlock(&info->irq_spinlock);
@@ -3303,7 +3303,8 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
{
DECLARE_WAITQUEUE(wait, current);
int retval;
- int do_clocal = 0, extra_count = 0;
+ bool do_clocal = false;
+ bool extra_count = false;
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
@@ -3317,7 +3318,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
}
if (tty->termios->c_cflag & CLOCAL)
- do_clocal = 1;
+ do_clocal = true;
/* Wait for carrier detect and the line to become
* free (i.e., not in use by the callout). While we are in
@@ -3335,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
spin_lock_irqsave(&info->irq_spinlock, flags);
if (!tty_hung_up_p(filp)) {
- extra_count = 1;
+ extra_count = true;
info->count--;
}
spin_unlock_irqrestore(&info->irq_spinlock, flags);
@@ -4043,13 +4044,13 @@ static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
*
* info pointer to device instance data
*
- * Return Value: 1 if next buffered tx request loaded
+ * Return Value: true if next buffered tx request loaded
* into adapter's tx dma buffer,
- * 0 otherwise
+ * false otherwise
*/
-static int load_next_tx_holding_buffer(struct mgsl_struct *info)
+static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
{
- int ret = 0;
+ bool ret = false;
if ( info->tx_holding_count ) {
/* determine if we have enough tx dma buffers
@@ -4073,7 +4074,7 @@ static int load_next_tx_holding_buffer(struct mgsl_struct *info)
/* restart transmit timer */
mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
- ret = 1;
+ ret = true;
}
}
@@ -4119,7 +4120,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
__FILE__,__LINE__,info->device_name, info->io_base);
return -ENODEV;
}
- info->io_addr_requested = 1;
+ info->io_addr_requested = true;
if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
info->device_name, info ) < 0 ) {
@@ -4127,7 +4128,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
__FILE__,__LINE__,info->device_name, info->irq_level );
goto errout;
}
- info->irq_requested = 1;
+ info->irq_requested = true;
if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
@@ -4135,13 +4136,13 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
__FILE__,__LINE__,info->device_name, info->phys_memory_base);
goto errout;
}
- info->shared_mem_requested = 1;
+ info->shared_mem_requested = true;
if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
__FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
goto errout;
}
- info->lcr_mem_requested = 1;
+ info->lcr_mem_requested = true;
info->memory_base = ioremap(info->phys_memory_base,0x40000);
if (!info->memory_base) {
@@ -4172,7 +4173,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
mgsl_release_resources( info );
return -ENODEV;
}
- info->dma_requested = 1;
+ info->dma_requested = true;
/* ISA adapter uses bus master DMA */
set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
@@ -4200,12 +4201,12 @@ static void mgsl_release_resources(struct mgsl_struct *info)
if ( info->irq_requested ) {
free_irq(info->irq_level, info);
- info->irq_requested = 0;
+ info->irq_requested = false;
}
if ( info->dma_requested ) {
disable_dma(info->dma_level);
free_dma(info->dma_level);
- info->dma_requested = 0;
+ info->dma_requested = false;
}
mgsl_free_dma_buffers(info);
mgsl_free_intermediate_rxbuffer_memory(info);
@@ -4213,15 +4214,15 @@ static void mgsl_release_resources(struct mgsl_struct *info)
if ( info->io_addr_requested ) {
release_region(info->io_base,info->io_addr_size);
- info->io_addr_requested = 0;
+ info->io_addr_requested = false;
}
if ( info->shared_mem_requested ) {
release_mem_region(info->phys_memory_base,0x40000);
- info->shared_mem_requested = 0;
+ info->shared_mem_requested = false;
}
if ( info->lcr_mem_requested ) {
release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
- info->lcr_mem_requested = 0;
+ info->lcr_mem_requested = false;
}
if (info->memory_base){
iounmap(info->memory_base);
@@ -4486,7 +4487,7 @@ static int __init synclink_init(void)
if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
else
- pci_registered = 1;
+ pci_registered = true;
if ((rc = mgsl_init_tty()) < 0)
goto error;
@@ -4679,7 +4680,7 @@ static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
static void usc_set_sdlc_mode( struct mgsl_struct *info )
{
u16 RegValue;
- int PreSL1660;
+ bool PreSL1660;
/*
* determine if the IUSC on the adapter is pre-SL1660. If
@@ -4692,11 +4693,7 @@ static void usc_set_sdlc_mode( struct mgsl_struct *info )
*/
usc_OutReg(info,TMCR,0x1f);
RegValue=usc_InReg(info,TMDR);
- if ( RegValue == IUSC_PRE_SL1660 )
- PreSL1660 = 1;
- else
- PreSL1660 = 0;
-
+ PreSL1660 = (RegValue == IUSC_PRE_SL1660);
if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
{
@@ -5382,9 +5379,9 @@ static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
int start_index;
int end_index;
int frame_start_index;
- int start_of_frame_found = FALSE;
- int end_of_frame_found = FALSE;
- int reprogram_dma = FALSE;
+ bool start_of_frame_found = false;
+ bool end_of_frame_found = false;
+ bool reprogram_dma = false;
DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
u32 phys_addr;
@@ -5410,9 +5407,9 @@ static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
if ( !start_of_frame_found )
{
- start_of_frame_found = TRUE;
+ start_of_frame_found = true;
frame_start_index = end_index;
- end_of_frame_found = FALSE;
+ end_of_frame_found = false;
}
if ( buffer_list[end_index].status )
@@ -5423,8 +5420,8 @@ static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
/* We want to leave the buffers for this frame intact. */
/* Move on to next possible frame. */
- start_of_frame_found = FALSE;
- end_of_frame_found = TRUE;
+ start_of_frame_found = false;
+ end_of_frame_found = true;
}
/* advance to next buffer entry in linked list */
@@ -5439,8 +5436,8 @@ static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
/* completely screwed, reset all receive buffers! */
mgsl_reset_rx_dma_buffers( info );
frame_start_index = 0;
- start_of_frame_found = FALSE;
- reprogram_dma = TRUE;
+ start_of_frame_found = false;
+ reprogram_dma = true;
break;
}
}
@@ -5466,7 +5463,7 @@ static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
} while( start_index != end_index );
- reprogram_dma = TRUE;
+ reprogram_dma = true;
}
if ( reprogram_dma )
@@ -5536,9 +5533,9 @@ static void usc_stop_receiver( struct mgsl_struct *info )
usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
usc_RTCmd( info, RTCmd_PurgeRxFifo );
- info->rx_enabled = 0;
- info->rx_overflow = 0;
- info->rx_rcc_underrun = 0;
+ info->rx_enabled = false;
+ info->rx_overflow = false;
+ info->rx_rcc_underrun = false;
} /* end of stop_receiver() */
@@ -5601,7 +5598,7 @@ static void usc_start_receiver( struct mgsl_struct *info )
usc_OutReg( info, CCSR, 0x1020 );
- info->rx_enabled = 1;
+ info->rx_enabled = true;
} /* end of usc_start_receiver() */
@@ -5628,14 +5625,14 @@ static void usc_start_transmitter( struct mgsl_struct *info )
/* RTS and set a flag indicating that the driver should */
/* negate RTS when the transmission completes. */
- info->drop_rts_on_tx_done = 0;
+ info->drop_rts_on_tx_done = false;
if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
usc_get_serial_signals( info );
if ( !(info->serial_signals & SerialSignal_RTS) ) {
info->serial_signals |= SerialSignal_RTS;
usc_set_serial_signals( info );
- info->drop_rts_on_tx_done = 1;
+ info->drop_rts_on_tx_done = true;
}
}
@@ -5699,11 +5696,11 @@ static void usc_start_transmitter( struct mgsl_struct *info )
mod_timer(&info->tx_timer, jiffies +
msecs_to_jiffies(5000));
}
- info->tx_active = 1;
+ info->tx_active = true;
}
if ( !info->tx_enabled ) {
- info->tx_enabled = 1;
+ info->tx_enabled = true;
if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
else
@@ -5735,8 +5732,8 @@ static void usc_stop_transmitter( struct mgsl_struct *info )
usc_DmaCmd( info, DmaCmd_ResetTxChannel );
usc_RTCmd( info, RTCmd_PurgeTxFifo );
- info->tx_enabled = 0;
- info->tx_active = 0;
+ info->tx_enabled = false;
+ info->tx_active = false;
} /* end of usc_stop_transmitter() */
@@ -6520,7 +6517,7 @@ static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
*/
static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
{
- int Done = 0;
+ bool Done = false;
DMABUFFERENTRY *pBufEntry;
unsigned int Index;
@@ -6534,7 +6531,7 @@ static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int S
if ( Index == EndIndex ) {
/* This is the last buffer of the frame! */
- Done = 1;
+ Done = true;
}
/* reset current buffer for reuse */
@@ -6559,18 +6556,18 @@ static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int S
* receive DMA buffers. Only frames received without errors are returned.
*
* Arguments: info pointer to device extension
- * Return Value: 1 if frame returned, otherwise 0
+ * Return Value: true if frame returned, otherwise false
*/
-static int mgsl_get_rx_frame(struct mgsl_struct *info)
+static bool mgsl_get_rx_frame(struct mgsl_struct *info)
{
unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
unsigned short status;
DMABUFFERENTRY *pBufEntry;
unsigned int framesize = 0;
- int ReturnCode = 0;
+ bool ReturnCode = false;
unsigned long flags;
struct tty_struct *tty = info->tty;
- int return_frame = 0;
+ bool return_frame = false;
/*
* current_rx_buffer points to the 1st buffer of the next available
@@ -6629,7 +6626,7 @@ static int mgsl_get_rx_frame(struct mgsl_struct *info)
else {
info->icount.rxcrc++;
if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
- return_frame = 1;
+ return_frame = true;
}
framesize = 0;
#if SYNCLINK_GENERIC_HDLC
@@ -6640,7 +6637,7 @@ static int mgsl_get_rx_frame(struct mgsl_struct *info)
}
#endif
} else
- return_frame = 1;
+ return_frame = true;
if ( return_frame ) {
/* receive frame has no errors, get frame size.
@@ -6719,7 +6716,7 @@ static int mgsl_get_rx_frame(struct mgsl_struct *info)
/* Free the buffers used by this frame. */
mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
- ReturnCode = 1;
+ ReturnCode = true;
Cleanup:
@@ -6758,15 +6755,15 @@ Cleanup:
* last Rx DMA buffer and return that last portion of the frame.
*
* Arguments: info pointer to device extension
- * Return Value: 1 if frame returned, otherwise 0
+ * Return Value: true if frame returned, otherwise false
*/
-static int mgsl_get_raw_rx_frame(struct mgsl_struct *info)
+static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
{
unsigned int CurrentIndex, NextIndex;
unsigned short status;
DMABUFFERENTRY *pBufEntry;
unsigned int framesize = 0;
- int ReturnCode = 0;
+ bool ReturnCode = false;
unsigned long flags;
struct tty_struct *tty = info->tty;
@@ -6891,7 +6888,7 @@ static int mgsl_get_raw_rx_frame(struct mgsl_struct *info)
/* Free the buffers used by this frame. */
mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
- ReturnCode = 1;
+ ReturnCode = true;
}
@@ -7000,15 +6997,15 @@ static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
* Performs a register test of the 16C32.
*
* Arguments: info pointer to device instance data
- * Return Value: TRUE if test passed, otherwise FALSE
+ * Return Value: true if test passed, otherwise false
*/
-static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
+static bool mgsl_register_test( struct mgsl_struct *info )
{
static unsigned short BitPatterns[] =
{ 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
unsigned int i;
- BOOLEAN rc = TRUE;
+ bool rc = true;
unsigned long flags;
spin_lock_irqsave(&info->irq_spinlock,flags);
@@ -7019,10 +7016,10 @@ static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
if ( (usc_InReg( info, SICR ) != 0) ||
(usc_InReg( info, IVR ) != 0) ||
(usc_InDmaReg( info, DIVR ) != 0) ){
- rc = FALSE;
+ rc = false;
}
- if ( rc == TRUE ){
+ if ( rc ){
/* Write bit patterns to various registers but do it out of */
/* sync, then read back and verify values. */
@@ -7040,7 +7037,7 @@ static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
(usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
(usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
(usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
- rc = FALSE;
+ rc = false;
break;
}
}
@@ -7056,9 +7053,9 @@ static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
/* mgsl_irq_test() Perform interrupt test of the 16C32.
*
* Arguments: info pointer to device instance data
- * Return Value: TRUE if test passed, otherwise FALSE
+ * Return Value: true if test passed, otherwise false
*/
-static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
+static bool mgsl_irq_test( struct mgsl_struct *info )
{
unsigned long EndTime;
unsigned long flags;
@@ -7068,10 +7065,10 @@ static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
/*
* Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
- * The ISR sets irq_occurred to 1.
+ * The ISR sets irq_occurred to true.
*/
- info->irq_occurred = FALSE;
+ info->irq_occurred = false;
/* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
/* Enable INTEN (Port 6, Bit12) */
@@ -7097,10 +7094,7 @@ static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
usc_reset(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
- if ( !info->irq_occurred )
- return FALSE;
- else
- return TRUE;
+ return info->irq_occurred;
} /* end of mgsl_irq_test() */
@@ -7111,16 +7105,16 @@ static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
* using single buffer DMA mode.
*
* Arguments: info pointer to device instance data
- * Return Value: TRUE if test passed, otherwise FALSE
+ * Return Value: true if test passed, otherwise false
*/
-static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
+static bool mgsl_dma_test( struct mgsl_struct *info )
{
unsigned short FifoLevel;
unsigned long phys_addr;
unsigned int FrameSize;
unsigned int i;
char *TmpPtr;
- BOOLEAN rc = TRUE;
+ bool rc = true;
unsigned short status=0;
unsigned long EndTime;
unsigned long flags;
@@ -7233,7 +7227,7 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
for(;;) {
if (time_after(jiffies, EndTime)) {
- rc = FALSE;
+ rc = false;
break;
}
@@ -7289,7 +7283,7 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
for(;;) {
if (time_after(jiffies, EndTime)) {
- rc = FALSE;
+ rc = false;
break;
}
@@ -7309,7 +7303,7 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
}
- if ( rc == TRUE )
+ if ( rc )
{
/* Enable 16C32 transmitter. */
@@ -7337,7 +7331,7 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
if (time_after(jiffies, EndTime)) {
- rc = FALSE;
+ rc = false;
break;
}
@@ -7348,13 +7342,13 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
}
- if ( rc == TRUE ){
+ if ( rc ){
/* CHECK FOR TRANSMIT ERRORS */
if ( status & (BIT5 + BIT1) )
- rc = FALSE;
+ rc = false;
}
- if ( rc == TRUE ) {
+ if ( rc ) {
/* WAIT FOR RECEIVE COMPLETE */
/* Wait 100ms */
@@ -7364,7 +7358,7 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
status=info->rx_buffer_list[0].status;
while ( status == 0 ) {
if (time_after(jiffies, EndTime)) {
- rc = FALSE;
+ rc = false;
break;
}
status=info->rx_buffer_list[0].status;
@@ -7372,17 +7366,17 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
}
- if ( rc == TRUE ) {
+ if ( rc ) {
/* CHECK FOR RECEIVE ERRORS */
status = info->rx_buffer_list[0].status;
if ( status & (BIT8 + BIT3 + BIT1) ) {
/* receive error has occurred */
- rc = FALSE;
+ rc = false;
} else {
if ( memcmp( info->tx_buffer_list[0].virt_addr ,
info->rx_buffer_list[0].virt_addr, FrameSize ) ){
- rc = FALSE;
+ rc = false;
}
}
}
@@ -7445,9 +7439,9 @@ static int mgsl_adapter_test( struct mgsl_struct *info )
* Test the shared memory on a PCI adapter.
*
* Arguments: info pointer to device instance data
- * Return Value: TRUE if test passed, otherwise FALSE
+ * Return Value: true if test passed, otherwise false
*/
-static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
+static bool mgsl_memory_test( struct mgsl_struct *info )
{
static unsigned long BitPatterns[] =
{ 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
@@ -7457,7 +7451,7 @@ static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
unsigned long * TestAddr;
if ( info->bus_type != MGSL_BUS_TYPE_PCI )
- return TRUE;
+ return true;
TestAddr = (unsigned long *)info->memory_base;
@@ -7466,7 +7460,7 @@ static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
for ( i = 0 ; i < Patterncount ; i++ ) {
*TestAddr = BitPatterns[i];
if ( *TestAddr != BitPatterns[i] )
- return FALSE;
+ return false;
}
/* Test address lines with incrementing pattern over */
@@ -7481,13 +7475,13 @@ static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
for ( i = 0 ; i < TestLimit ; i++ ) {
if ( *TestAddr != i * 4 )
- return FALSE;
+ return false;
TestAddr++;
}
memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
- return TRUE;
+ return true;
} /* End Of mgsl_memory_test() */
@@ -7604,7 +7598,7 @@ static void mgsl_tx_timeout(unsigned long context)
info->icount.txtimeout++;
}
spin_lock_irqsave(&info->irq_spinlock,flags);
- info->tx_active = 0;
+ info->tx_active = false;
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
@@ -7632,7 +7626,7 @@ static int mgsl_loopmode_send_done( struct mgsl_struct * info )
spin_lock_irqsave(&info->irq_spinlock,flags);
if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
if (info->tx_active)
- info->loopmode_send_done_requested = TRUE;
+ info->loopmode_send_done_requested = true;
else
usc_loopmode_send_done(info);
}
@@ -7646,7 +7640,7 @@ static int mgsl_loopmode_send_done( struct mgsl_struct * info )
*/
static void usc_loopmode_send_done( struct mgsl_struct * info )
{
- info->loopmode_send_done_requested = FALSE;
+ info->loopmode_send_done_requested = false;
/* clear CMR:13 to 0 to start echoing RxData to TxData */
info->cmr_value &= ~BIT13;
usc_OutReg(info, CMR, info->cmr_value);
@@ -7668,7 +7662,7 @@ static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
*/
static void usc_loopmode_insert_request( struct mgsl_struct * info )
{
- info->loopmode_insert_requested = TRUE;
+ info->loopmode_insert_requested = true;
/* enable RxAbort irq. On next RxAbort, clear CMR:13 to
* begin repeating TxData on RxData (complete insertion)
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 3c89266c8255..f3d8d72e5ea4 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -117,7 +117,7 @@ static struct pci_driver pci_driver = {
.remove = __devexit_p(remove_one),
};
-static int pci_registered;
+static bool pci_registered;
/*
* module configuration and status
@@ -289,12 +289,12 @@ struct slgt_info {
struct work_struct task;
u32 pending_bh;
- int bh_requested;
- int bh_running;
+ bool bh_requested;
+ bool bh_running;
int isr_overflow;
- int irq_requested; /* nonzero if IRQ requested */
- int irq_occurred; /* for diagnostics use */
+ bool irq_requested; /* true if IRQ requested */
+ bool irq_occurred; /* for diagnostics use */
/* device configuration */
@@ -304,7 +304,7 @@ struct slgt_info {
unsigned char __iomem * reg_addr; /* memory mapped registers address */
u32 phys_reg_addr;
- int reg_addr_requested;
+ bool reg_addr_requested;
MGSL_PARAMS params; /* communications parameters */
u32 idle_mode;
@@ -315,11 +315,11 @@ struct slgt_info {
/* device status */
- int rx_enabled;
- int rx_restart;
+ bool rx_enabled;
+ bool rx_restart;
- int tx_enabled;
- int tx_active;
+ bool tx_enabled;
+ bool tx_active;
unsigned char signals; /* serial signal states */
int init_error; /* initialization error */
@@ -329,7 +329,7 @@ struct slgt_info {
char flag_buf[MAX_ASYNC_BUFFER_SIZE];
char char_buf[MAX_ASYNC_BUFFER_SIZE];
- BOOLEAN drop_rts_on_tx_done;
+ bool drop_rts_on_tx_done;
struct _input_signal_events input_signal_events;
int dcd_chkcount; /* check counts to prevent */
@@ -467,8 +467,8 @@ static void rx_start(struct slgt_info *info);
static void reset_rbufs(struct slgt_info *info);
static void free_rbufs(struct slgt_info *info, unsigned int first, unsigned int last);
static void rdma_reset(struct slgt_info *info);
-static int rx_get_frame(struct slgt_info *info);
-static int rx_get_buf(struct slgt_info *info);
+static bool rx_get_frame(struct slgt_info *info);
+static bool rx_get_buf(struct slgt_info *info);
static void tx_start(struct slgt_info *info);
static void tx_stop(struct slgt_info *info);
@@ -1968,8 +1968,8 @@ static int bh_action(struct slgt_info *info)
rc = BH_STATUS;
} else {
/* Mark BH routine as complete */
- info->bh_running = 0;
- info->bh_requested = 0;
+ info->bh_running = false;
+ info->bh_requested = false;
rc = 0;
}
@@ -1988,7 +1988,7 @@ static void bh_handler(struct work_struct *work)
if (!info)
return;
- info->bh_running = 1;
+ info->bh_running = true;
while((action = bh_action(info))) {
switch (action) {
@@ -2158,7 +2158,7 @@ static void isr_serial(struct slgt_info *info)
wr_reg16(info, SSR, status); /* clear pending */
- info->irq_occurred = 1;
+ info->irq_occurred = true;
if (info->params.mode == MGSL_MODE_ASYNC) {
if (status & IRQ_TXIDLE) {
@@ -2225,7 +2225,7 @@ static void isr_rdma(struct slgt_info *info)
if (status & (BIT5 + BIT4)) {
DBGISR(("%s isr_rdma rx_restart=1\n", info->device_name));
- info->rx_restart = 1;
+ info->rx_restart = true;
}
info->pending_bh |= BH_RECEIVE;
}
@@ -2276,14 +2276,14 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
info->icount.txok++;
}
- info->tx_active = 0;
+ info->tx_active = false;
info->tx_count = 0;
del_timer(&info->tx_timer);
if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
info->signals &= ~SerialSignal_RTS;
- info->drop_rts_on_tx_done = 0;
+ info->drop_rts_on_tx_done = false;
set_signals(info);
}
@@ -2337,7 +2337,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
while((gsr = rd_reg32(info, GSR) & 0xffffff00)) {
DBGISR(("%s gsr=%08x\n", info->device_name, gsr));
- info->irq_occurred = 1;
+ info->irq_occurred = true;
for(i=0; i < info->port_count ; i++) {
if (info->port_array[i] == NULL)
continue;
@@ -2374,7 +2374,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
!port->bh_requested) {
DBGISR(("%s bh queued\n", port->device_name));
schedule_work(&port->task);
- port->bh_requested = 1;
+ port->bh_requested = true;
}
}
@@ -3110,7 +3110,8 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
{
DECLARE_WAITQUEUE(wait, current);
int retval;
- int do_clocal = 0, extra_count = 0;
+ bool do_clocal = false;
+ bool extra_count = false;
unsigned long flags;
DBGINFO(("%s block_til_ready\n", tty->driver->name));
@@ -3122,7 +3123,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
}
if (tty->termios->c_cflag & CLOCAL)
- do_clocal = 1;
+ do_clocal = true;
/* Wait for carrier detect and the line to become
* free (i.e., not in use by the callout). While we are in
@@ -3136,7 +3137,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
spin_lock_irqsave(&info->lock, flags);
if (!tty_hung_up_p(filp)) {
- extra_count = 1;
+ extra_count = true;
info->count--;
}
spin_unlock_irqrestore(&info->lock, flags);
@@ -3321,7 +3322,7 @@ static int claim_resources(struct slgt_info *info)
goto errout;
}
else
- info->reg_addr_requested = 1;
+ info->reg_addr_requested = true;
info->reg_addr = ioremap(info->phys_reg_addr, SLGT_REG_SIZE);
if (!info->reg_addr) {
@@ -3341,12 +3342,12 @@ static void release_resources(struct slgt_info *info)
{
if (info->irq_requested) {
free_irq(info->irq_level, info);
- info->irq_requested = 0;
+ info->irq_requested = false;
}
if (info->reg_addr_requested) {
release_mem_region(info->phys_reg_addr, SLGT_REG_SIZE);
- info->reg_addr_requested = 0;
+ info->reg_addr_requested = false;
}
if (info->reg_addr) {
@@ -3511,7 +3512,7 @@ static void device_init(int adapter_num, struct pci_dev *pdev)
port_array[0]->device_name,
port_array[0]->irq_level));
} else {
- port_array[0]->irq_requested = 1;
+ port_array[0]->irq_requested = true;
adapter_test(port_array[0]);
for (i=1 ; i < port_count ; i++) {
port_array[i]->init_error = port_array[0]->init_error;
@@ -3654,7 +3655,7 @@ static int __init slgt_init(void)
printk("%s pci_register_driver error=%d\n", driver_name, rc);
goto error;
}
- pci_registered = 1;
+ pci_registered = true;
if (!slgt_device_list)
printk("%s no devices found\n",driver_name);
@@ -3812,8 +3813,8 @@ static void rx_stop(struct slgt_info *info)
rdma_reset(info);
- info->rx_enabled = 0;
- info->rx_restart = 0;
+ info->rx_enabled = false;
+ info->rx_restart = false;
}
static void rx_start(struct slgt_info *info)
@@ -3849,8 +3850,8 @@ static void rx_start(struct slgt_info *info)
/* enable receiver */
wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | BIT1));
- info->rx_restart = 0;
- info->rx_enabled = 1;
+ info->rx_restart = false;
+ info->rx_enabled = true;
}
static void tx_start(struct slgt_info *info)
@@ -3858,11 +3859,11 @@ static void tx_start(struct slgt_info *info)
if (!info->tx_enabled) {
wr_reg16(info, TCR,
(unsigned short)((rd_reg16(info, TCR) | BIT1) & ~BIT2));
- info->tx_enabled = TRUE;
+ info->tx_enabled = true;
}
if (info->tx_count) {
- info->drop_rts_on_tx_done = 0;
+ info->drop_rts_on_tx_done = false;
if (info->params.mode != MGSL_MODE_ASYNC) {
if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
@@ -3870,7 +3871,7 @@ static void tx_start(struct slgt_info *info)
if (!(info->signals & SerialSignal_RTS)) {
info->signals |= SerialSignal_RTS;
set_signals(info);
- info->drop_rts_on_tx_done = 1;
+ info->drop_rts_on_tx_done = true;
}
}
@@ -3888,7 +3889,7 @@ static void tx_start(struct slgt_info *info)
wr_reg16(info, SSR, IRQ_TXIDLE);
}
tdma_start(info);
- info->tx_active = 1;
+ info->tx_active = true;
}
}
@@ -3949,8 +3950,8 @@ static void tx_stop(struct slgt_info *info)
reset_tbufs(info);
- info->tx_enabled = 0;
- info->tx_active = 0;
+ info->tx_enabled = false;
+ info->tx_active = false;
}
static void reset_port(struct slgt_info *info)
@@ -4470,14 +4471,13 @@ static void reset_rbufs(struct slgt_info *info)
/*
* pass receive HDLC frame to upper layer
*
- * return 1 if frame available, otherwise 0
+ * return true if frame available, otherwise false
*/
-static int rx_get_frame(struct slgt_info *info)
+static bool rx_get_frame(struct slgt_info *info)
{
unsigned int start, end;
unsigned short status;
unsigned int framesize = 0;
- int rc = 0;
unsigned long flags;
struct tty_struct *tty = info->tty;
unsigned char addr_field = 0xff;
@@ -4601,23 +4601,23 @@ check_again:
}
}
free_rbufs(info, start, end);
- rc = 1;
+ return true;
cleanup:
- return rc;
+ return false;
}
/*
* pass receive buffer (RAW synchronous mode) to tty layer
- * return 1 if buffer available, otherwise 0
+ * return true if buffer available, otherwise false
*/
-static int rx_get_buf(struct slgt_info *info)
+static bool rx_get_buf(struct slgt_info *info)
{
unsigned int i = info->rbuf_current;
unsigned int count;
if (!desc_complete(info->rbufs[i]))
- return 0;
+ return false;
count = desc_count(info->rbufs[i]);
switch(info->params.mode) {
case MGSL_MODE_MONOSYNC:
@@ -4633,7 +4633,7 @@ static int rx_get_buf(struct slgt_info *info)
ldisc_receive_buf(info->tty, info->rbufs[i].buf,
info->flag_buf, count);
free_rbufs(info, i, i);
- return 1;
+ return true;
}
static void reset_tbufs(struct slgt_info *info)
@@ -4758,7 +4758,7 @@ static int irq_test(struct slgt_info *info)
/* assume failure */
info->init_error = DiagStatus_IrqFailure;
- info->irq_occurred = FALSE;
+ info->irq_occurred = false;
spin_unlock_irqrestore(&info->lock, flags);
@@ -4891,7 +4891,7 @@ static void tx_timeout(unsigned long context)
info->icount.txtimeout++;
}
spin_lock_irqsave(&info->lock,flags);
- info->tx_active = 0;
+ info->tx_active = false;
info->tx_count = 0;
spin_unlock_irqrestore(&info->lock,flags);
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index c96062ea72b4..e98c3e6f8216 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -188,9 +188,9 @@ typedef struct _synclinkmp_info {
u32 pending_bh;
- int bh_running; /* Protection from multiple */
+ bool bh_running; /* Protection from multiple */
int isr_overflow;
- int bh_requested;
+ bool bh_requested;
int dcd_chkcount; /* check counts to prevent */
int cts_chkcount; /* too many IRQs if a signal */
@@ -213,11 +213,11 @@ typedef struct _synclinkmp_info {
unsigned char *tmp_rx_buf;
unsigned int tmp_rx_buf_count;
- int rx_enabled;
- int rx_overflow;
+ bool rx_enabled;
+ bool rx_overflow;
- int tx_enabled;
- int tx_active;
+ bool tx_enabled;
+ bool tx_active;
u32 idle_mode;
unsigned char ie0_value;
@@ -238,13 +238,13 @@ typedef struct _synclinkmp_info {
unsigned int irq_level; /* interrupt level */
unsigned long irq_flags;
- int irq_requested; /* nonzero if IRQ requested */
+ bool irq_requested; /* true if IRQ requested */
MGSL_PARAMS params; /* communications parameters */
unsigned char serial_signals; /* current serial signal states */
- int irq_occurred; /* for diagnostics use */
+ bool irq_occurred; /* for diagnostics use */
unsigned int init_error; /* Initialization startup error */
u32 last_mem_alloc;
@@ -255,7 +255,7 @@ typedef struct _synclinkmp_info {
unsigned char* sca_base; /* HD64570 SCA Memory address */
u32 phys_sca_base;
u32 sca_offset;
- int sca_base_requested;
+ bool sca_base_requested;
unsigned char* lcr_base; /* local config registers (PCI only) */
u32 phys_lcr_base;
@@ -265,12 +265,12 @@ typedef struct _synclinkmp_info {
unsigned char* statctrl_base; /* status/control register memory */
u32 phys_statctrl_base;
u32 statctrl_offset;
- int sca_statctrl_requested;
+ bool sca_statctrl_requested;
u32 misc_ctrl_value;
char flag_buf[MAX_ASYNC_BUFFER_SIZE];
char char_buf[MAX_ASYNC_BUFFER_SIZE];
- BOOLEAN drop_rts_on_tx_done;
+ bool drop_rts_on_tx_done;
struct _input_signal_events input_signal_events;
@@ -571,12 +571,12 @@ static void shutdown(SLMP_INFO *info);
static void program_hw(SLMP_INFO *info);
static void change_params(SLMP_INFO *info);
-static int init_adapter(SLMP_INFO *info);
-static int register_test(SLMP_INFO *info);
-static int irq_test(SLMP_INFO *info);
-static int loopback_test(SLMP_INFO *info);
+static bool init_adapter(SLMP_INFO *info);
+static bool register_test(SLMP_INFO *info);
+static bool irq_test(SLMP_INFO *info);
+static bool loopback_test(SLMP_INFO *info);
static int adapter_test(SLMP_INFO *info);
-static int memory_test(SLMP_INFO *info);
+static bool memory_test(SLMP_INFO *info);
static void reset_adapter(SLMP_INFO *info);
static void reset_port(SLMP_INFO *info);
@@ -587,7 +587,7 @@ static void rx_stop(SLMP_INFO *info);
static void rx_start(SLMP_INFO *info);
static void rx_reset_buffers(SLMP_INFO *info);
static void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int last);
-static int rx_get_frame(SLMP_INFO *info);
+static bool rx_get_frame(SLMP_INFO *info);
static void tx_start(SLMP_INFO *info);
static void tx_stop(SLMP_INFO *info);
@@ -1473,7 +1473,7 @@ static inline int line_info(char *buf, SLMP_INFO *info)
/* Called to print information about devices
*/
-int read_proc(char *page, char **start, off_t off, int count,
+static int read_proc(char *page, char **start, off_t off, int count,
int *eof, void *data)
{
int len = 0, l;
@@ -2024,7 +2024,7 @@ static void hdlcdev_exit(SLMP_INFO *info)
/* Return next bottom half action to perform.
* Return Value: BH action code or 0 if nothing to do.
*/
-int bh_action(SLMP_INFO *info)
+static int bh_action(SLMP_INFO *info)
{
unsigned long flags;
int rc = 0;
@@ -2044,8 +2044,8 @@ int bh_action(SLMP_INFO *info)
if (!rc) {
/* Mark BH routine as complete */
- info->bh_running = 0;
- info->bh_requested = 0;
+ info->bh_running = false;
+ info->bh_requested = false;
}
spin_unlock_irqrestore(&info->lock,flags);
@@ -2055,7 +2055,7 @@ int bh_action(SLMP_INFO *info)
/* Perform bottom half processing of work items queued by ISR.
*/
-void bh_handler(struct work_struct *work)
+static void bh_handler(struct work_struct *work)
{
SLMP_INFO *info = container_of(work, SLMP_INFO, task);
int action;
@@ -2067,7 +2067,7 @@ void bh_handler(struct work_struct *work)
printk( "%s(%d):%s bh_handler() entry\n",
__FILE__,__LINE__,info->device_name);
- info->bh_running = 1;
+ info->bh_running = true;
while((action = bh_action(info)) != 0) {
@@ -2100,7 +2100,7 @@ void bh_handler(struct work_struct *work)
__FILE__,__LINE__,info->device_name);
}
-void bh_receive(SLMP_INFO *info)
+static void bh_receive(SLMP_INFO *info)
{
if ( debug_level >= DEBUG_LEVEL_BH )
printk( "%s(%d):%s bh_receive()\n",
@@ -2109,7 +2109,7 @@ void bh_receive(SLMP_INFO *info)
while( rx_get_frame(info) );
}
-void bh_transmit(SLMP_INFO *info)
+static void bh_transmit(SLMP_INFO *info)
{
struct tty_struct *tty = info->tty;
@@ -2121,7 +2121,7 @@ void bh_transmit(SLMP_INFO *info)
tty_wakeup(tty);
}
-void bh_status(SLMP_INFO *info)
+static void bh_status(SLMP_INFO *info)
{
if ( debug_level >= DEBUG_LEVEL_BH )
printk( "%s(%d):%s bh_status() entry\n",
@@ -2133,7 +2133,7 @@ void bh_status(SLMP_INFO *info)
info->cts_chkcount = 0;
}
-void isr_timer(SLMP_INFO * info)
+static void isr_timer(SLMP_INFO * info)
{
unsigned char timer = (info->port_num & 1) ? TIMER2 : TIMER0;
@@ -2152,14 +2152,14 @@ void isr_timer(SLMP_INFO * info)
*/
write_reg(info, (unsigned char)(timer + TMCS), 0);
- info->irq_occurred = TRUE;
+ info->irq_occurred = true;
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s(%d):%s isr_timer()\n",
__FILE__,__LINE__,info->device_name);
}
-void isr_rxint(SLMP_INFO * info)
+static void isr_rxint(SLMP_INFO * info)
{
struct tty_struct *tty = info->tty;
struct mgsl_icount *icount = &info->icount;
@@ -2218,7 +2218,7 @@ void isr_rxint(SLMP_INFO * info)
/*
* handle async rx data interrupts
*/
-void isr_rxrdy(SLMP_INFO * info)
+static void isr_rxrdy(SLMP_INFO * info)
{
u16 status;
unsigned char DataByte;
@@ -2232,7 +2232,7 @@ void isr_rxrdy(SLMP_INFO * info)
while((status = read_reg(info,CST0)) & BIT0)
{
int flag = 0;
- int over = 0;
+ bool over = false;
DataByte = read_reg(info,TRB);
icount->rx++;
@@ -2265,7 +2265,7 @@ void isr_rxrdy(SLMP_INFO * info)
* reported immediately, and doesn't
* affect the current character
*/
- over = 1;
+ over = true;
}
}
} /* end of if (error) */
@@ -2318,14 +2318,14 @@ static void isr_txeom(SLMP_INFO * info, unsigned char status)
info->icount.txok++;
}
- info->tx_active = 0;
+ info->tx_active = false;
info->tx_count = info->tx_put = info->tx_get = 0;
del_timer(&info->tx_timer);
if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done ) {
info->serial_signals &= ~SerialSignal_RTS;
- info->drop_rts_on_tx_done = 0;
+ info->drop_rts_on_tx_done = false;
set_signals(info);
}
@@ -2348,7 +2348,7 @@ static void isr_txeom(SLMP_INFO * info, unsigned char status)
/*
* handle tx status interrupts
*/
-void isr_txint(SLMP_INFO * info)
+static void isr_txint(SLMP_INFO * info)
{
unsigned char status = read_reg(info, SR1) & info->ie1_value & (UDRN + IDLE + CCTS);
@@ -2376,7 +2376,7 @@ void isr_txint(SLMP_INFO * info)
/*
* handle async tx data interrupts
*/
-void isr_txrdy(SLMP_INFO * info)
+static void isr_txrdy(SLMP_INFO * info)
{
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s(%d):%s isr_txrdy() tx_count=%d\n",
@@ -2398,7 +2398,7 @@ void isr_txrdy(SLMP_INFO * info)
if ( info->tx_count )
tx_load_fifo( info );
else {
- info->tx_active = 0;
+ info->tx_active = false;
info->ie0_value &= ~TXRDYE;
write_reg(info, IE0, info->ie0_value);
}
@@ -2407,7 +2407,7 @@ void isr_txrdy(SLMP_INFO * info)
info->pending_bh |= BH_TRANSMIT;
}
-void isr_rxdmaok(SLMP_INFO * info)
+static void isr_rxdmaok(SLMP_INFO * info)
{
/* BIT7 = EOT (end of transfer)
* BIT6 = EOM (end of message/frame)
@@ -2424,7 +2424,7 @@ void isr_rxdmaok(SLMP_INFO * info)
info->pending_bh |= BH_RECEIVE;
}
-void isr_rxdmaerror(SLMP_INFO * info)
+static void isr_rxdmaerror(SLMP_INFO * info)
{
/* BIT5 = BOF (buffer overflow)
* BIT4 = COF (counter overflow)
@@ -2438,11 +2438,11 @@ void isr_rxdmaerror(SLMP_INFO * info)
printk("%s(%d):%s isr_rxdmaerror(), status=%02x\n",
__FILE__,__LINE__,info->device_name,status);
- info->rx_overflow = TRUE;
+ info->rx_overflow = true;
info->pending_bh |= BH_RECEIVE;
}
-void isr_txdmaok(SLMP_INFO * info)
+static void isr_txdmaok(SLMP_INFO * info)
{
unsigned char status_reg1 = read_reg(info, SR1);
@@ -2460,7 +2460,7 @@ void isr_txdmaok(SLMP_INFO * info)
write_reg(info, IE0, info->ie0_value);
}
-void isr_txdmaerror(SLMP_INFO * info)
+static void isr_txdmaerror(SLMP_INFO * info)
{
/* BIT5 = BOF (buffer overflow)
* BIT4 = COF (counter overflow)
@@ -2477,7 +2477,7 @@ void isr_txdmaerror(SLMP_INFO * info)
/* handle input serial signal changes
*/
-void isr_io_pin( SLMP_INFO *info, u16 status )
+static void isr_io_pin( SLMP_INFO *info, u16 status )
{
struct mgsl_icount *icount;
@@ -2691,7 +2691,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
printk("%s(%d):%s queueing bh task.\n",
__FILE__,__LINE__,port->device_name);
schedule_work(&port->task);
- port->bh_requested = 1;
+ port->bh_requested = true;
}
}
@@ -3320,7 +3320,8 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
{
DECLARE_WAITQUEUE(wait, current);
int retval;
- int do_clocal = 0, extra_count = 0;
+ bool do_clocal = false;
+ bool extra_count = false;
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
@@ -3335,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
}
if (tty->termios->c_cflag & CLOCAL)
- do_clocal = 1;
+ do_clocal = true;
/* Wait for carrier detect and the line to become
* free (i.e., not in use by the callout). While we are in
@@ -3353,7 +3354,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
spin_lock_irqsave(&info->lock, flags);
if (!tty_hung_up_p(filp)) {
- extra_count = 1;
+ extra_count = true;
info->count--;
}
spin_unlock_irqrestore(&info->lock, flags);
@@ -3413,7 +3414,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
return retval;
}
-int alloc_dma_bufs(SLMP_INFO *info)
+static int alloc_dma_bufs(SLMP_INFO *info)
{
unsigned short BuffersPerFrame;
unsigned short BufferCount;
@@ -3487,7 +3488,7 @@ int alloc_dma_bufs(SLMP_INFO *info)
/* Allocate DMA buffers for the transmit and receive descriptor lists.
*/
-int alloc_buf_list(SLMP_INFO *info)
+static int alloc_buf_list(SLMP_INFO *info)
{
unsigned int i;
@@ -3546,7 +3547,7 @@ int alloc_buf_list(SLMP_INFO *info)
/* Allocate the frame DMA buffers used by the specified buffer list.
*/
-int alloc_frame_bufs(SLMP_INFO *info, SCADESC *buf_list,SCADESC_EX *buf_list_ex,int count)
+static int alloc_frame_bufs(SLMP_INFO *info, SCADESC *buf_list,SCADESC_EX *buf_list_ex,int count)
{
int i;
unsigned long phys_addr;
@@ -3563,7 +3564,7 @@ int alloc_frame_bufs(SLMP_INFO *info, SCADESC *buf_list,SCADESC_EX *buf_list_ex,
return 0;
}
-void free_dma_bufs(SLMP_INFO *info)
+static void free_dma_bufs(SLMP_INFO *info)
{
info->buffer_list = NULL;
info->rx_buf_list = NULL;
@@ -3573,7 +3574,7 @@ void free_dma_bufs(SLMP_INFO *info)
/* allocate buffer large enough to hold max_frame_size.
* This buffer is used to pass an assembled frame to the line discipline.
*/
-int alloc_tmp_rx_buf(SLMP_INFO *info)
+static int alloc_tmp_rx_buf(SLMP_INFO *info)
{
info->tmp_rx_buf = kmalloc(info->max_frame_size, GFP_KERNEL);
if (info->tmp_rx_buf == NULL)
@@ -3581,13 +3582,13 @@ int alloc_tmp_rx_buf(SLMP_INFO *info)
return 0;
}
-void free_tmp_rx_buf(SLMP_INFO *info)
+static void free_tmp_rx_buf(SLMP_INFO *info)
{
kfree(info->tmp_rx_buf);
info->tmp_rx_buf = NULL;
}
-int claim_resources(SLMP_INFO *info)
+static int claim_resources(SLMP_INFO *info)
{
if (request_mem_region(info->phys_memory_base,SCA_MEM_SIZE,"synclinkmp") == NULL) {
printk( "%s(%d):%s mem addr conflict, Addr=%08X\n",
@@ -3596,7 +3597,7 @@ int claim_resources(SLMP_INFO *info)
goto errout;
}
else
- info->shared_mem_requested = 1;
+ info->shared_mem_requested = true;
if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclinkmp") == NULL) {
printk( "%s(%d):%s lcr mem addr conflict, Addr=%08X\n",
@@ -3605,7 +3606,7 @@ int claim_resources(SLMP_INFO *info)
goto errout;
}
else
- info->lcr_mem_requested = 1;
+ info->lcr_mem_requested = true;
if (request_mem_region(info->phys_sca_base + info->sca_offset,SCA_BASE_SIZE,"synclinkmp") == NULL) {
printk( "%s(%d):%s sca mem addr conflict, Addr=%08X\n",
@@ -3614,7 +3615,7 @@ int claim_resources(SLMP_INFO *info)
goto errout;
}
else
- info->sca_base_requested = 1;
+ info->sca_base_requested = true;
if (request_mem_region(info->phys_statctrl_base + info->statctrl_offset,SCA_REG_SIZE,"synclinkmp") == NULL) {
printk( "%s(%d):%s stat/ctrl mem addr conflict, Addr=%08X\n",
@@ -3623,7 +3624,7 @@ int claim_resources(SLMP_INFO *info)
goto errout;
}
else
- info->sca_statctrl_requested = 1;
+ info->sca_statctrl_requested = true;
info->memory_base = ioremap(info->phys_memory_base,SCA_MEM_SIZE);
if (!info->memory_base) {
@@ -3674,7 +3675,7 @@ errout:
return -ENODEV;
}
-void release_resources(SLMP_INFO *info)
+static void release_resources(SLMP_INFO *info)
{
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):%s release_resources() entry\n",
@@ -3682,24 +3683,24 @@ void release_resources(SLMP_INFO *info)
if ( info->irq_requested ) {
free_irq(info->irq_level, info);
- info->irq_requested = 0;
+ info->irq_requested = false;
}
if ( info->shared_mem_requested ) {
release_mem_region(info->phys_memory_base,SCA_MEM_SIZE);
- info->shared_mem_requested = 0;
+ info->shared_mem_requested = false;
}
if ( info->lcr_mem_requested ) {
release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
- info->lcr_mem_requested = 0;
+ info->lcr_mem_requested = false;
}
if ( info->sca_base_requested ) {
release_mem_region(info->phys_sca_base + info->sca_offset,SCA_BASE_SIZE);
- info->sca_base_requested = 0;
+ info->sca_base_requested = false;
}
if ( info->sca_statctrl_requested ) {
release_mem_region(info->phys_statctrl_base + info->statctrl_offset,SCA_REG_SIZE);
- info->sca_statctrl_requested = 0;
+ info->sca_statctrl_requested = false;
}
if (info->memory_base){
@@ -3730,7 +3731,7 @@ void release_resources(SLMP_INFO *info)
/* Add the specified device instance data structure to the
* global linked list of devices and increment the device count.
*/
-void add_device(SLMP_INFO *info)
+static void add_device(SLMP_INFO *info)
{
info->next_device = NULL;
info->line = synclinkmp_device_count;
@@ -3853,7 +3854,7 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
return info;
}
-void device_init(int adapter_num, struct pci_dev *pdev)
+static void device_init(int adapter_num, struct pci_dev *pdev)
{
SLMP_INFO *port_array[SCA_MAX_PORTS];
int port;
@@ -3902,7 +3903,7 @@ void device_init(int adapter_num, struct pci_dev *pdev)
port_array[0]->irq_level );
}
else {
- port_array[0]->irq_requested = 1;
+ port_array[0]->irq_requested = true;
adapter_test(port_array[0]);
}
}
@@ -4047,7 +4048,7 @@ module_exit(synclinkmp_exit);
* The TxCLK and RxCLK signals are generated from the BRG and
* the TxD is looped back to the RxD internally.
*/
-void enable_loopback(SLMP_INFO *info, int enable)
+static void enable_loopback(SLMP_INFO *info, int enable)
{
if (enable) {
/* MD2 (Mode Register 2)
@@ -4094,7 +4095,7 @@ void enable_loopback(SLMP_INFO *info, int enable)
* data_rate data rate of clock in bits per second
* A data rate of 0 disables the AUX clock.
*/
-void set_rate( SLMP_INFO *info, u32 data_rate )
+static void set_rate( SLMP_INFO *info, u32 data_rate )
{
u32 TMCValue;
unsigned char BRValue;
@@ -4140,7 +4141,7 @@ void set_rate( SLMP_INFO *info, u32 data_rate )
/* Disable receiver
*/
-void rx_stop(SLMP_INFO *info)
+static void rx_stop(SLMP_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):%s rx_stop()\n",
@@ -4155,13 +4156,13 @@ void rx_stop(SLMP_INFO *info)
write_reg(info, RXDMA + DCMD, SWABORT); /* reset/init Rx DMA */
write_reg(info, RXDMA + DIR, 0); /* disable Rx DMA interrupts */
- info->rx_enabled = 0;
- info->rx_overflow = 0;
+ info->rx_enabled = false;
+ info->rx_overflow = false;
}
/* enable the receiver
*/
-void rx_start(SLMP_INFO *info)
+static void rx_start(SLMP_INFO *info)
{
int i;
@@ -4211,14 +4212,14 @@ void rx_start(SLMP_INFO *info)
write_reg(info, CMD, RXENABLE);
- info->rx_overflow = FALSE;
- info->rx_enabled = 1;
+ info->rx_overflow = false;
+ info->rx_enabled = true;
}
/* Enable the transmitter and send a transmit frame if
* one is loaded in the DMA buffers.
*/
-void tx_start(SLMP_INFO *info)
+static void tx_start(SLMP_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):%s tx_start() tx_count=%d\n",
@@ -4227,7 +4228,7 @@ void tx_start(SLMP_INFO *info)
if (!info->tx_enabled ) {
write_reg(info, CMD, TXRESET);
write_reg(info, CMD, TXENABLE);
- info->tx_enabled = TRUE;
+ info->tx_enabled = true;
}
if ( info->tx_count ) {
@@ -4236,7 +4237,7 @@ void tx_start(SLMP_INFO *info)
/* RTS and set a flag indicating that the driver should */
/* negate RTS when the transmission completes. */
- info->drop_rts_on_tx_done = 0;
+ info->drop_rts_on_tx_done = false;
if (info->params.mode != MGSL_MODE_ASYNC) {
@@ -4245,7 +4246,7 @@ void tx_start(SLMP_INFO *info)
if ( !(info->serial_signals & SerialSignal_RTS) ) {
info->serial_signals |= SerialSignal_RTS;
set_signals( info );
- info->drop_rts_on_tx_done = 1;
+ info->drop_rts_on_tx_done = true;
}
}
@@ -4282,13 +4283,13 @@ void tx_start(SLMP_INFO *info)
write_reg(info, IE0, info->ie0_value);
}
- info->tx_active = 1;
+ info->tx_active = true;
}
}
/* stop the transmitter and DMA
*/
-void tx_stop( SLMP_INFO *info )
+static void tx_stop( SLMP_INFO *info )
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):%s tx_stop()\n",
@@ -4308,14 +4309,14 @@ void tx_stop( SLMP_INFO *info )
info->ie0_value &= ~TXRDYE;
write_reg(info, IE0, info->ie0_value); /* disable tx data interrupts */
- info->tx_enabled = 0;
- info->tx_active = 0;
+ info->tx_enabled = false;
+ info->tx_active = false;
}
/* Fill the transmit FIFO until the FIFO is full or
* there is no more data to load.
*/
-void tx_load_fifo(SLMP_INFO *info)
+static void tx_load_fifo(SLMP_INFO *info)
{
u8 TwoBytes[2];
@@ -4364,7 +4365,7 @@ void tx_load_fifo(SLMP_INFO *info)
/* Reset a port to a known state
*/
-void reset_port(SLMP_INFO *info)
+static void reset_port(SLMP_INFO *info)
{
if (info->sca_base) {
@@ -4388,7 +4389,7 @@ void reset_port(SLMP_INFO *info)
/* Reset all the ports to a known state.
*/
-void reset_adapter(SLMP_INFO *info)
+static void reset_adapter(SLMP_INFO *info)
{
int i;
@@ -4400,7 +4401,7 @@ void reset_adapter(SLMP_INFO *info)
/* Program port for asynchronous communications.
*/
-void async_mode(SLMP_INFO *info)
+static void async_mode(SLMP_INFO *info)
{
unsigned char RegValue;
@@ -4539,7 +4540,7 @@ void async_mode(SLMP_INFO *info)
/* Program the SCA for HDLC communications.
*/
-void hdlc_mode(SLMP_INFO *info)
+static void hdlc_mode(SLMP_INFO *info)
{
unsigned char RegValue;
u32 DpllDivisor;
@@ -4741,7 +4742,7 @@ void hdlc_mode(SLMP_INFO *info)
/* Set the transmit HDLC idle mode
*/
-void tx_set_idle(SLMP_INFO *info)
+static void tx_set_idle(SLMP_INFO *info)
{
unsigned char RegValue = 0xff;
@@ -4761,7 +4762,7 @@ void tx_set_idle(SLMP_INFO *info)
/* Query the adapter for the state of the V24 status (input) signals.
*/
-void get_signals(SLMP_INFO *info)
+static void get_signals(SLMP_INFO *info)
{
u16 status = read_reg(info, SR3);
u16 gpstatus = read_status_reg(info);
@@ -4790,7 +4791,7 @@ void get_signals(SLMP_INFO *info)
/* Set the state of DTR and RTS based on contents of
* serial_signals member of device context.
*/
-void set_signals(SLMP_INFO *info)
+static void set_signals(SLMP_INFO *info)
{
unsigned char RegValue;
u16 EnableBit;
@@ -4819,7 +4820,7 @@ void set_signals(SLMP_INFO *info)
* and set the current buffer to the first buffer. This effectively
* makes all buffers free and discards any data in buffers.
*/
-void rx_reset_buffers(SLMP_INFO *info)
+static void rx_reset_buffers(SLMP_INFO *info)
{
rx_free_frame_buffers(info, 0, info->rx_buf_count - 1);
}
@@ -4830,16 +4831,16 @@ void rx_reset_buffers(SLMP_INFO *info)
* first index of 1st receive buffer of frame
* last index of last receive buffer of frame
*/
-void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int last)
+static void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int last)
{
- int done = 0;
+ bool done = false;
while(!done) {
/* reset current buffer for reuse */
info->rx_buf_list[first].status = 0xff;
if (first == last) {
- done = 1;
+ done = true;
/* set new last rx descriptor address */
write_reg16(info, RXDMA + EDA, info->rx_buf_list_ex[first].phys_entry);
}
@@ -4856,14 +4857,14 @@ void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int las
/* Return a received frame from the receive DMA buffers.
* Only frames received without errors are returned.
*
- * Return Value: 1 if frame returned, otherwise 0
+ * Return Value: true if frame returned, otherwise false
*/
-int rx_get_frame(SLMP_INFO *info)
+static bool rx_get_frame(SLMP_INFO *info)
{
unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
unsigned short status;
unsigned int framesize = 0;
- int ReturnCode = 0;
+ bool ReturnCode = false;
unsigned long flags;
struct tty_struct *tty = info->tty;
unsigned char addr_field = 0xff;
@@ -5014,7 +5015,7 @@ CheckAgain:
/* Free the buffers used by this frame. */
rx_free_frame_buffers( info, StartIndex, EndIndex );
- ReturnCode = 1;
+ ReturnCode = true;
Cleanup:
if ( info->rx_enabled && info->rx_overflow ) {
@@ -5033,7 +5034,7 @@ Cleanup:
/* load the transmit DMA buffer with data
*/
-void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count)
+static void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count)
{
unsigned short copy_count;
unsigned int i = 0;
@@ -5073,12 +5074,12 @@ void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count)
info->last_tx_buf = ++i;
}
-int register_test(SLMP_INFO *info)
+static bool register_test(SLMP_INFO *info)
{
static unsigned char testval[] = {0x00, 0xff, 0xaa, 0x55, 0x69, 0x96};
static unsigned int count = ARRAY_SIZE(testval);
unsigned int i;
- int rc = TRUE;
+ bool rc = true;
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
@@ -5101,7 +5102,7 @@ int register_test(SLMP_INFO *info)
(read_reg(info, SA0) != testval[(i+2)%count]) ||
(read_reg(info, SA1) != testval[(i+3)%count]) )
{
- rc = FALSE;
+ rc = false;
break;
}
}
@@ -5112,7 +5113,7 @@ int register_test(SLMP_INFO *info)
return rc;
}
-int irq_test(SLMP_INFO *info)
+static bool irq_test(SLMP_INFO *info)
{
unsigned long timeout;
unsigned long flags;
@@ -5124,7 +5125,7 @@ int irq_test(SLMP_INFO *info)
/* assume failure */
info->init_error = DiagStatus_IrqFailure;
- info->irq_occurred = FALSE;
+ info->irq_occurred = false;
/* setup timer0 on SCA0 to interrupt */
@@ -5163,7 +5164,7 @@ int irq_test(SLMP_INFO *info)
/* initialize individual SCA device (2 ports)
*/
-static int sca_init(SLMP_INFO *info)
+static bool sca_init(SLMP_INFO *info)
{
/* set wait controller to single mem partition (low), no wait states */
write_reg(info, PABR0, 0); /* wait controller addr boundary 0 */
@@ -5199,12 +5200,12 @@ static int sca_init(SLMP_INFO *info)
*/
write_reg(info, ITCR, 0);
- return TRUE;
+ return true;
}
/* initialize adapter hardware
*/
-int init_adapter(SLMP_INFO *info)
+static bool init_adapter(SLMP_INFO *info)
{
int i;
@@ -5257,20 +5258,20 @@ int init_adapter(SLMP_INFO *info)
sca_init(info->port_array[0]);
sca_init(info->port_array[2]);
- return TRUE;
+ return true;
}
/* Loopback an HDLC frame to test the hardware
* interrupt and DMA functions.
*/
-int loopback_test(SLMP_INFO *info)
+static bool loopback_test(SLMP_INFO *info)
{
#define TESTFRAMESIZE 20
unsigned long timeout;
u16 count = TESTFRAMESIZE;
unsigned char buf[TESTFRAMESIZE];
- int rc = FALSE;
+ bool rc = false;
unsigned long flags;
struct tty_struct *oldtty = info->tty;
@@ -5304,16 +5305,16 @@ int loopback_test(SLMP_INFO *info)
msleep_interruptible(10);
if (rx_get_frame(info)) {
- rc = TRUE;
+ rc = true;
break;
}
}
/* verify received frame length and contents */
- if (rc == TRUE &&
- ( info->tmp_rx_buf_count != count ||
- memcmp(buf, info->tmp_rx_buf,count))) {
- rc = FALSE;
+ if (rc &&
+ ( info->tmp_rx_buf_count != count ||
+ memcmp(buf, info->tmp_rx_buf,count))) {
+ rc = false;
}
spin_lock_irqsave(&info->lock,flags);
@@ -5328,7 +5329,7 @@ int loopback_test(SLMP_INFO *info)
/* Perform diagnostics on hardware
*/
-int adapter_test( SLMP_INFO *info )
+static int adapter_test( SLMP_INFO *info )
{
unsigned long flags;
if ( debug_level >= DEBUG_LEVEL_INFO )
@@ -5390,7 +5391,7 @@ int adapter_test( SLMP_INFO *info )
/* Test the shared memory on a PCI adapter.
*/
-int memory_test(SLMP_INFO *info)
+static bool memory_test(SLMP_INFO *info)
{
static unsigned long testval[] = { 0x0, 0x55555555, 0xaaaaaaaa,
0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
@@ -5404,7 +5405,7 @@ int memory_test(SLMP_INFO *info)
for ( i = 0 ; i < count ; i++ ) {
*addr = testval[i];
if ( *addr != testval[i] )
- return FALSE;
+ return false;
}
/* Test address lines with incrementing pattern over */
@@ -5419,12 +5420,12 @@ int memory_test(SLMP_INFO *info)
for ( i = 0 ; i < limit ; i++ ) {
if ( *addr != i * 4 )
- return FALSE;
+ return false;
addr++;
}
memset( info->memory_base, 0, SCA_MEM_SIZE );
- return TRUE;
+ return true;
}
/* Load data into PCI adapter shared memory.
@@ -5442,7 +5443,7 @@ int memory_test(SLMP_INFO *info)
* the write transation. This allows any pending DMA request to gain control
* of the local bus in a timely fasion.
*/
-void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count)
+static void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count)
{
/* A load interval of 16 allows for 4 32-bit writes at */
/* 136ns each for a maximum latency of 542ns on the local bus.*/
@@ -5461,7 +5462,7 @@ void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned shor
memcpy(dest, src, count % sca_pci_load_interval);
}
-void trace_block(SLMP_INFO *info,const char* data, int count, int xmit)
+static void trace_block(SLMP_INFO *info,const char* data, int count, int xmit)
{
int i;
int linecount;
@@ -5496,7 +5497,7 @@ void trace_block(SLMP_INFO *info,const char* data, int count, int xmit)
/* called when HDLC frame times out
* update stats and do tx completion processing
*/
-void tx_timeout(unsigned long context)
+static void tx_timeout(unsigned long context)
{
SLMP_INFO *info = (SLMP_INFO*)context;
unsigned long flags;
@@ -5508,7 +5509,7 @@ void tx_timeout(unsigned long context)
info->icount.txtimeout++;
}
spin_lock_irqsave(&info->lock,flags);
- info->tx_active = 0;
+ info->tx_active = false;
info->tx_count = info->tx_put = info->tx_get = 0;
spin_unlock_irqrestore(&info->lock,flags);
@@ -5523,7 +5524,7 @@ void tx_timeout(unsigned long context)
/* called to periodically check the DSR/RI modem signal input status
*/
-void status_timeout(unsigned long context)
+static void status_timeout(unsigned long context)
{
u16 status = 0;
SLMP_INFO *info = (SLMP_INFO*)context;
@@ -5574,36 +5575,36 @@ void status_timeout(unsigned long context)
}
-unsigned char read_reg(SLMP_INFO * info, unsigned char Addr)
+static unsigned char read_reg(SLMP_INFO * info, unsigned char Addr)
{
CALC_REGADDR();
return *RegAddr;
}
-void write_reg(SLMP_INFO * info, unsigned char Addr, unsigned char Value)
+static void write_reg(SLMP_INFO * info, unsigned char Addr, unsigned char Value)
{
CALC_REGADDR();
*RegAddr = Value;
}
-u16 read_reg16(SLMP_INFO * info, unsigned char Addr)
+static u16 read_reg16(SLMP_INFO * info, unsigned char Addr)
{
CALC_REGADDR();
return *((u16 *)RegAddr);
}
-void write_reg16(SLMP_INFO * info, unsigned char Addr, u16 Value)
+static void write_reg16(SLMP_INFO * info, unsigned char Addr, u16 Value)
{
CALC_REGADDR();
*((u16 *)RegAddr) = Value;
}
-unsigned char read_status_reg(SLMP_INFO * info)
+static unsigned char read_status_reg(SLMP_INFO * info)
{
unsigned char *RegAddr = (unsigned char *)info->statctrl_base;
return *RegAddr;
}
-void write_control_reg(SLMP_INFO * info)
+static void write_control_reg(SLMP_INFO * info)
{
unsigned char *RegAddr = (unsigned char *)info->statctrl_base;
*RegAddr = info->port_array[0]->ctrlreg_value;
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index de60e1ea4fb3..1ade193c9128 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -271,8 +271,7 @@ static struct sysrq_key_op sysrq_term_op = {
static void moom_callback(struct work_struct *ignored)
{
- out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL],
- GFP_KERNEL, 0);
+ out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0);
}
static DECLARE_WORK(moom_work, moom_callback);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 9b58b894f823..df4c3ead9e2b 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -2054,6 +2054,7 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
unsigned long draw_from = 0, draw_to = 0;
struct vc_data *vc;
unsigned char vc_attr;
+ struct vt_notifier_param param;
uint8_t rescan;
uint8_t inverse;
uint8_t width;
@@ -2113,6 +2114,8 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
if (IS_FG(vc))
hide_cursor(vc);
+ param.vc = vc;
+
while (!tty->stopped && count) {
int orig = *buf;
c = orig;
@@ -2201,6 +2204,11 @@ rescan_last_byte:
tc = vc->vc_translate[vc->vc_toggle_meta ? (c | 0x80) : c];
}
+ param.c = tc;
+ if (atomic_notifier_call_chain(&vt_notifier_list, VT_PREWRITE,
+ &param) == NOTIFY_STOP)
+ continue;
+
/* If the original code was a control character we
* only allow a glyph to be displayed if the code is
* not normally used (such as for cursor movement) or
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index d8db2f8ee411..24c62b848bf9 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -43,6 +43,7 @@ struct gpio_desc {
/* flag symbols are bit numbers */
#define FLAG_REQUESTED 0
#define FLAG_IS_OUT 1
+#define FLAG_RESERVED 2
#ifdef CONFIG_DEBUG_FS
const char *label;
@@ -68,6 +69,9 @@ static void gpio_ensure_requested(struct gpio_desc *desc)
if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) {
pr_warning("GPIO-%d autorequested\n", (int)(desc - gpio_desc));
desc_set_label(desc, "[auto]");
+ if (!try_module_get(desc->chip->owner))
+ pr_err("GPIO-%d: module can't be gotten \n",
+ (int)(desc - gpio_desc));
}
}
@@ -77,6 +81,76 @@ static inline struct gpio_chip *gpio_to_chip(unsigned gpio)
return gpio_desc[gpio].chip;
}
+/* dynamic allocation of GPIOs, e.g. on a hotplugged device */
+static int gpiochip_find_base(int ngpio)
+{
+ int i;
+ int spare = 0;
+ int base = -ENOSPC;
+
+ for (i = ARCH_NR_GPIOS - 1; i >= 0 ; i--) {
+ struct gpio_desc *desc = &gpio_desc[i];
+ struct gpio_chip *chip = desc->chip;
+
+ if (!chip && !test_bit(FLAG_RESERVED, &desc->flags)) {
+ spare++;
+ if (spare == ngpio) {
+ base = i;
+ break;
+ }
+ } else {
+ spare = 0;
+ if (chip)
+ i -= chip->ngpio - 1;
+ }
+ }
+
+ if (gpio_is_valid(base))
+ pr_debug("%s: found new base at %d\n", __func__, base);
+ return base;
+}
+
+/**
+ * gpiochip_reserve() - reserve range of gpios to use with platform code only
+ * @start: starting gpio number
+ * @ngpio: number of gpios to reserve
+ * Context: platform init, potentially before irqs or kmalloc will work
+ *
+ * Returns a negative errno if any gpio within the range is already reserved
+ * or registered, else returns zero as a success code. Use this function
+ * to mark a range of gpios as unavailable for dynamic gpio number allocation,
+ * for example because its driver support is not yet loaded.
+ */
+int __init gpiochip_reserve(int start, int ngpio)
+{
+ int ret = 0;
+ unsigned long flags;
+ int i;
+
+ if (!gpio_is_valid(start) || !gpio_is_valid(start + ngpio))
+ return -EINVAL;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ for (i = start; i < start + ngpio; i++) {
+ struct gpio_desc *desc = &gpio_desc[i];
+
+ if (desc->chip || test_bit(FLAG_RESERVED, &desc->flags)) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ set_bit(FLAG_RESERVED, &desc->flags);
+ }
+
+ pr_debug("%s: reserved gpios from %d to %d\n",
+ __func__, start, start + ngpio - 1);
+err:
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ return ret;
+}
+
/**
* gpiochip_add() - register a gpio_chip
* @chip: the chip to register, with chip->base initialized
@@ -85,38 +159,49 @@ static inline struct gpio_chip *gpio_to_chip(unsigned gpio)
* Returns a negative errno if the chip can't be registered, such as
* because the chip->base is invalid or already associated with a
* different chip. Otherwise it returns zero as a success code.
+ *
+ * If chip->base is negative, this requests dynamic assignment of
+ * a range of valid GPIOs.
*/
int gpiochip_add(struct gpio_chip *chip)
{
unsigned long flags;
int status = 0;
unsigned id;
+ int base = chip->base;
- /* NOTE chip->base negative is reserved to mean a request for
- * dynamic allocation. We don't currently support that.
- */
-
- if (chip->base < 0 || (chip->base + chip->ngpio) >= ARCH_NR_GPIOS) {
+ if ((!gpio_is_valid(base) || !gpio_is_valid(base + chip->ngpio))
+ && base >= 0) {
status = -EINVAL;
goto fail;
}
spin_lock_irqsave(&gpio_lock, flags);
+ if (base < 0) {
+ base = gpiochip_find_base(chip->ngpio);
+ if (base < 0) {
+ status = base;
+ goto fail_unlock;
+ }
+ chip->base = base;
+ }
+
/* these GPIO numbers must not be managed by another gpio_chip */
- for (id = chip->base; id < chip->base + chip->ngpio; id++) {
+ for (id = base; id < base + chip->ngpio; id++) {
if (gpio_desc[id].chip != NULL) {
status = -EBUSY;
break;
}
}
if (status == 0) {
- for (id = chip->base; id < chip->base + chip->ngpio; id++) {
+ for (id = base; id < base + chip->ngpio; id++) {
gpio_desc[id].chip = chip;
gpio_desc[id].flags = 0;
}
}
+fail_unlock:
spin_unlock_irqrestore(&gpio_lock, flags);
fail:
/* failures here can mean systems won't boot... */
@@ -171,12 +256,15 @@ int gpio_request(unsigned gpio, const char *label)
spin_lock_irqsave(&gpio_lock, flags);
- if (gpio >= ARCH_NR_GPIOS)
+ if (!gpio_is_valid(gpio))
goto done;
desc = &gpio_desc[gpio];
if (desc->chip == NULL)
goto done;
+ if (!try_module_get(desc->chip->owner))
+ goto done;
+
/* NOTE: gpio_request() can be called in early boot,
* before IRQs are enabled.
*/
@@ -184,8 +272,10 @@ int gpio_request(unsigned gpio, const char *label)
if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) {
desc_set_label(desc, label ? : "?");
status = 0;
- } else
+ } else {
status = -EBUSY;
+ module_put(desc->chip->owner);
+ }
done:
if (status)
@@ -201,7 +291,7 @@ void gpio_free(unsigned gpio)
unsigned long flags;
struct gpio_desc *desc;
- if (gpio >= ARCH_NR_GPIOS) {
+ if (!gpio_is_valid(gpio)) {
WARN_ON(extra_checks);
return;
}
@@ -209,9 +299,10 @@ void gpio_free(unsigned gpio)
spin_lock_irqsave(&gpio_lock, flags);
desc = &gpio_desc[gpio];
- if (desc->chip && test_and_clear_bit(FLAG_REQUESTED, &desc->flags))
+ if (desc->chip && test_and_clear_bit(FLAG_REQUESTED, &desc->flags)) {
desc_set_label(desc, NULL);
- else
+ module_put(desc->chip->owner);
+ } else
WARN_ON(extra_checks);
spin_unlock_irqrestore(&gpio_lock, flags);
@@ -236,7 +327,7 @@ const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset)
{
unsigned gpio = chip->base + offset;
- if (gpio >= ARCH_NR_GPIOS || gpio_desc[gpio].chip != chip)
+ if (!gpio_is_valid(gpio) || gpio_desc[gpio].chip != chip)
return NULL;
if (test_bit(FLAG_REQUESTED, &gpio_desc[gpio].flags) == 0)
return NULL;
@@ -267,7 +358,7 @@ int gpio_direction_input(unsigned gpio)
spin_lock_irqsave(&gpio_lock, flags);
- if (gpio >= ARCH_NR_GPIOS)
+ if (!gpio_is_valid(gpio))
goto fail;
chip = desc->chip;
if (!chip || !chip->get || !chip->direction_input)
@@ -305,7 +396,7 @@ int gpio_direction_output(unsigned gpio, int value)
spin_lock_irqsave(&gpio_lock, flags);
- if (gpio >= ARCH_NR_GPIOS)
+ if (!gpio_is_valid(gpio))
goto fail;
chip = desc->chip;
if (!chip || !chip->set || !chip->direction_output)
@@ -522,7 +613,7 @@ static int gpiolib_show(struct seq_file *s, void *unused)
/* REVISIT this isn't locked against gpio_chip removal ... */
- for (gpio = 0; gpio < ARCH_NR_GPIOS; gpio++) {
+ for (gpio = 0; gpio_is_valid(gpio); gpio++) {
if (chip == gpio_desc[gpio].chip)
continue;
chip = gpio_desc[gpio].chip;
diff --git a/drivers/gpio/mcp23s08.c b/drivers/gpio/mcp23s08.c
index bb60e8c1a1f0..7fb5b9d009d4 100644
--- a/drivers/gpio/mcp23s08.c
+++ b/drivers/gpio/mcp23s08.c
@@ -239,6 +239,7 @@ static int mcp23s08_probe(struct spi_device *spi)
mcp->chip.base = pdata->base;
mcp->chip.ngpio = 8;
mcp->chip.can_sleep = 1;
+ mcp->chip.owner = THIS_MODULE;
spi_set_drvdata(spi, mcp);
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 6e72fd31184d..e0e0af536108 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -189,6 +189,7 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
gc->base = chip->gpio_start;
gc->ngpio = gpios;
gc->label = chip->client->name;
+ gc->owner = THIS_MODULE;
}
static int __devinit pca953x_probe(struct i2c_client *client)
diff --git a/drivers/gpio/pcf857x.c b/drivers/gpio/pcf857x.c
index c6b3b5378384..1106aa15ac79 100644
--- a/drivers/gpio/pcf857x.c
+++ b/drivers/gpio/pcf857x.c
@@ -159,6 +159,7 @@ static int pcf857x_probe(struct i2c_client *client)
gpio->chip.base = pdata->gpio_base;
gpio->chip.can_sleep = 1;
+ gpio->chip.owner = THIS_MODULE;
/* NOTE: the OnSemi jlc1562b is also largely compatible with
* these parts, notably for output. It has a low-resolution
diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c
index d158f579bde2..713cef20622e 100644
--- a/drivers/ide/arm/bast-ide.c
+++ b/drivers/ide/arm/bast-ide.c
@@ -35,12 +35,12 @@ static int __init bastide_register(unsigned int base, unsigned int aux, int irq)
base += BAST_IDE_CS;
aux += BAST_IDE_CS;
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
- hw.io_ports[i] = (unsigned long)base;
+ for (i = 0; i <= 7; i++) {
+ hw.io_ports_array[i] = (unsigned long)base;
base += 0x20;
}
- hw.io_ports[IDE_CONTROL_OFFSET] = aux + (6 * 0x20);
+ hw.io_ports.ctl_addr = aux + (6 * 0x20);
hw.irq = irq;
hwif = ide_find_port();
@@ -49,11 +49,7 @@ static int __init bastide_register(unsigned int base, unsigned int aux, int irq)
i = hwif->index;
- if (hwif->present)
- ide_unregister(i);
- else
- ide_init_port_data(hwif, i);
-
+ ide_init_port_data(hwif, i);
ide_init_port_hw(hwif, &hw);
hwif->port_ops = NULL;
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index 7d642f44e35b..124445c20921 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -426,11 +426,12 @@ icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *e
*/
default_hwif_mmiops(hwif);
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
- hwif->io_ports[i] = port;
+ for (i = 0; i <= 7; i++) {
+ hwif->io_ports_array[i] = port;
port += 1 << info->stepping;
}
- hwif->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)base + info->ctrloffset;
+ hwif->io_ports.ctl_addr =
+ (unsigned long)base + info->ctrloffset;
hwif->irq = ec->irq;
hwif->chipset = ide_acorn;
hwif->gendev.parent = &ec->dev;
@@ -480,8 +481,7 @@ static const struct ide_port_info icside_v6_port_info __initdata = {
.init_dma = icside_dma_off_init,
.port_ops = &icside_v6_no_dma_port_ops,
.dma_ops = &icside_v6_dma_ops,
- .host_flags = IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_NO_AUTOTUNE,
+ .host_flags = IDE_HFLAG_SERIALIZE,
.mwdma_mask = ATA_MWDMA2,
.swdma_mask = ATA_SWDMA2,
};
@@ -547,14 +547,13 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
hwif->config_data = (unsigned long)ioc_base;
hwif->select_data = sel;
- mate->maskproc = icside_maskproc;
mate->hwif_data = state;
mate->config_data = (unsigned long)ioc_base;
mate->select_data = sel | 1;
if (ec->dma != NO_DMA && !request_dma(ec->dma, hwif->name)) {
d.init_dma = icside_dma_init;
- d.port_ops = &icside_v6_dma_port_ops;
+ d.port_ops = &icside_v6_port_ops;
d.dma_ops = NULL;
}
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
index 8fa34e26443a..aaf32541622d 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -321,7 +321,7 @@ static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif,
const struct ide_port_info *d)
{
unsigned long base =
- hwif->io_ports[IDE_DATA_OFFSET] - IDE_PALM_ATA_PRI_REG_OFFSET;
+ hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;
printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
@@ -386,8 +386,8 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
pribase = mem->start + IDE_PALM_ATA_PRI_REG_OFFSET;
for (i = 0; i < IDE_NR_PORTS - 2; i++)
- hw.io_ports[i] = pribase + i;
- hw.io_ports[IDE_CONTROL_OFFSET] = mem->start +
+ hw.io_ports_array[i] = pribase + i;
+ hw.io_ports.ctl_addr = mem->start +
IDE_PALM_ATA_PRI_CTL_OFFSET;
hw.irq = irq->start;
hw.chipset = ide_palm3710;
@@ -398,11 +398,7 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
i = hwif->index;
- if (hwif->present)
- ide_unregister(i);
- else
- ide_init_port_data(hwif, i);
-
+ ide_init_port_data(hwif, i);
ide_init_port_hw(hwif, &hw);
hwif->mmio = 1;
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c
index c0581bd98d0d..babc1a5e128d 100644
--- a/drivers/ide/arm/rapide.c
+++ b/drivers/ide/arm/rapide.c
@@ -17,11 +17,11 @@ static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base,
unsigned long port = (unsigned long)base;
int i;
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
- hw->io_ports[i] = port;
+ for (i = 0; i <= 7; i++) {
+ hw->io_ports_array[i] = port;
port += sz;
}
- hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
+ hw->io_ports.ctl_addr = (unsigned long)ctrl;
hw->irq = irq;
}
@@ -75,7 +75,7 @@ static void __devexit rapide_remove(struct expansion_card *ec)
ecard_set_drvdata(ec, NULL);
- ide_unregister(hwif->index);
+ ide_unregister(hwif);
ecard_release_resources(ec);
}
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index a62ca75c7e28..9df26855bc05 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -88,8 +88,8 @@ enum /* Transfer types */
int
cris_ide_ack_intr(ide_hwif_t* hwif)
{
- reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2,
- int, hwif->io_ports[0]);
+ reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
+ hwif->io_ports.data_addr);
REG_WR_INT(ata, regi_ata, rw_ack_intr, 1 << ctrl2.sel);
return 1;
}
@@ -231,7 +231,7 @@ cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int dir,int type,
ide_hwif_t *hwif = drive->hwif;
reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
- hwif->io_ports[IDE_DATA_OFFSET]);
+ hwif->io_ports.data_addr);
reg_ata_rw_trf_cnt trf_cnt = {0};
mycontext.saved_data = (dma_descr_data*)virt_to_phys(d);
@@ -271,7 +271,7 @@ static int cris_dma_test_irq(ide_drive_t *drive)
int intr = REG_RD_INT(ata, regi_ata, r_intr);
reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
- hwif->io_ports[IDE_DATA_OFFSET]);
+ hwif->io_ports.data_addr);
return intr & (1 << ctrl2.sel) ? 1 : 0;
}
@@ -531,7 +531,7 @@ static void cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int d
*R_ATA_CTRL_DATA =
cmd |
IO_FIELD(R_ATA_CTRL_DATA, data,
- drive->hwif->io_ports[IDE_DATA_OFFSET]) |
+ drive->hwif->io_ports.data_addr) |
IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) |
IO_STATE(R_ATA_CTRL_DATA, multi, on) |
IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
@@ -550,7 +550,7 @@ static int cris_dma_test_irq(ide_drive_t *drive)
{
int intr = *R_IRQ_MASK0_RD;
int bus = IO_EXTRACT(R_ATA_CTRL_DATA, sel,
- drive->hwif->io_ports[IDE_DATA_OFFSET]);
+ drive->hwif->io_ports.data_addr);
return intr & (1 << (bus + IO_BITNR(R_IRQ_MASK0_RD, ata_irq0))) ? 1 : 0;
}
@@ -644,7 +644,7 @@ cris_ide_inw(unsigned long reg) {
* call will also timeout on busy, but as long as the
* write is still performed, everything will be fine.
*/
- if (cris_ide_get_reg(reg) == IDE_STATUS_OFFSET)
+ if (cris_ide_get_reg(reg) == 7)
return BUSY_STAT;
else
/* For other rare cases we assume 0 is good enough. */
@@ -765,13 +765,13 @@ static void __init cris_setup_ports(hw_regs_t *hw, unsigned long base)
memset(hw, 0, sizeof(*hw));
for (i = 0; i <= 7; i++)
- hw->io_ports[i] = base + cris_ide_reg_addr(i, 0, 1);
+ hw->io_ports_array[i] = base + cris_ide_reg_addr(i, 0, 1);
/*
* the IDE control register is at ATA address 6,
* with CS1 active instead of CS0
*/
- hw->io_ports[IDE_CONTROL_OFFSET] = base + cris_ide_reg_addr(6, 1, 0);
+ hw->io_ports.ctl_addr = base + cris_ide_reg_addr(6, 1, 0);
hw->irq = ide_default_irq(0);
hw->ack_intr = cris_ide_ack_intr;
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c
index 0708b29cdb17..fd23f12e17aa 100644
--- a/drivers/ide/h8300/ide-h8300.c
+++ b/drivers/ide/h8300/ide-h8300.c
@@ -63,9 +63,9 @@ static inline void hw_setup(hw_regs_t *hw)
int i;
memset(hw, 0, sizeof(hw_regs_t));
- for (i = 0; i <= IDE_STATUS_OFFSET; i++)
- hw->io_ports[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i;
- hw->io_ports[IDE_CONTROL_OFFSET] = CONFIG_H8300_IDE_ALT;
+ for (i = 0; i <= 7; i++)
+ hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i;
+ hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT;
hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ;
hw->chipset = ide_generic;
}
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index e4ad26e4fce7..9d3601fa5680 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -60,9 +60,17 @@ struct ide_acpi_hwif_link {
#define DEBPRINT(fmt, args...) do {} while (0)
#endif /* DEBUGGING */
-extern int ide_noacpi;
-extern int ide_noacpitfs;
-extern int ide_noacpionboot;
+int ide_noacpi;
+module_param_named(noacpi, ide_noacpi, bool, 0);
+MODULE_PARM_DESC(noacpi, "disable IDE ACPI support");
+
+int ide_acpigtf;
+module_param_named(acpigtf, ide_acpigtf, bool, 0);
+MODULE_PARM_DESC(acpigtf, "enable IDE ACPI _GTF support");
+
+int ide_acpionboot;
+module_param_named(acpionboot, ide_acpionboot, bool, 0);
+MODULE_PARM_DESC(acpionboot, "call IDE ACPI methods on boot");
static bool ide_noacpi_psx;
static int no_acpi_psx(const struct dmi_system_id *id)
@@ -376,7 +384,7 @@ static int taskfile_load_raw(ide_drive_t *drive,
memcpy(&args.tf_array[7], &gtf->tfa, 7);
args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
- if (ide_noacpitfs) {
+ if (!ide_acpigtf) {
DEBPRINT("_GTF execution disabled\n");
return err;
}
@@ -721,7 +729,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *hwif)
drive->name, err);
}
- if (ide_noacpionboot) {
+ if (!ide_acpionboot) {
DEBPRINT("ACPI methods disabled on boot\n");
return;
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index ad984322da94..b34fd2bde96f 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -560,7 +560,7 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
/* packet command */
spin_lock_irqsave(&ide_lock, flags);
hwif->OUTBSYNC(drive, WIN_PACKETCMD,
- hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->io_ports.command_addr);
ndelay(400);
spin_unlock_irqrestore(&ide_lock, flags);
@@ -952,9 +952,9 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
}
/* ok we fall to pio :/ */
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]) & 0x3;
- lowcyl = hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
- highcyl = hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]);
+ ireason = hwif->INB(hwif->io_ports.nsect_addr) & 0x3;
+ lowcyl = hwif->INB(hwif->io_ports.lbam_addr);
+ highcyl = hwif->INB(hwif->io_ports.lbah_addr);
len = lowcyl + (256 * highcyl);
@@ -1909,9 +1909,7 @@ static int ide_cdrom_setup(ide_drive_t *drive)
/* set correct block size */
blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE);
- if (drive->autotune == IDE_TUNE_DEFAULT ||
- drive->autotune == IDE_TUNE_AUTO)
- drive->dsc_overlap = (drive->next != drive);
+ drive->dsc_overlap = (drive->next != drive);
if (ide_cdrom_register(drive, nslots)) {
printk(KERN_ERR "%s: %s failed to register device with the"
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 6e891bccd052..489079b8ed03 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -465,10 +465,10 @@ static ide_startstop_t idefloppy_pc_intr(ide_drive_t *drive)
}
/* Get the number of bytes to transfer */
- bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
- hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
+ bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
+ hwif->INB(hwif->io_ports.lbam_addr);
/* on this interrupt */
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+ ireason = hwif->INB(hwif->io_ports.nsect_addr);
if (ireason & CD) {
printk(KERN_ERR "ide-floppy: CoD != 0 in %s\n", __func__);
@@ -539,7 +539,7 @@ static ide_startstop_t idefloppy_transfer_pc(ide_drive_t *drive)
"initiated yet DRQ isn't asserted\n");
return startstop;
}
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+ ireason = hwif->INB(hwif->io_ports.nsect_addr);
if ((ireason & CD) == 0 || (ireason & IO)) {
printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) while "
"issuing a packet command\n");
@@ -586,7 +586,7 @@ static ide_startstop_t idefloppy_transfer_pc1(ide_drive_t *drive)
"initiated yet DRQ isn't asserted\n");
return startstop;
}
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+ ireason = hwif->INB(hwif->io_ports.nsect_addr);
if ((ireason & CD) == 0 || (ireason & IO)) {
printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) "
"while issuing a packet command\n");
@@ -692,7 +692,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
return ide_started;
} else {
/* Issue the packet command */
- hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr);
return (*pkt_xfer_routine) (drive);
}
}
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 0fe89a599275..3a2d8930d17f 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -298,48 +298,43 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
{
ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
struct ide_taskfile *tf = &task->tf;
if (task->tf_flags & IDE_TFLAG_IN_DATA) {
- u16 data = hwif->INW(hwif->io_ports[IDE_DATA_OFFSET]);
+ u16 data = hwif->INW(io_ports->data_addr);
tf->data = data & 0xff;
tf->hob_data = (data >> 8) & 0xff;
}
/* be sure we're looking at the low order bits */
- hwif->OUTB(drive->ctl & ~0x80, hwif->io_ports[IDE_CONTROL_OFFSET]);
+ hwif->OUTB(drive->ctl & ~0x80, io_ports->ctl_addr);
if (task->tf_flags & IDE_TFLAG_IN_NSECT)
- tf->nsect = hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]);
+ tf->nsect = hwif->INB(io_ports->nsect_addr);
if (task->tf_flags & IDE_TFLAG_IN_LBAL)
- tf->lbal = hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]);
+ tf->lbal = hwif->INB(io_ports->lbal_addr);
if (task->tf_flags & IDE_TFLAG_IN_LBAM)
- tf->lbam = hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]);
+ tf->lbam = hwif->INB(io_ports->lbam_addr);
if (task->tf_flags & IDE_TFLAG_IN_LBAH)
- tf->lbah = hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]);
+ tf->lbah = hwif->INB(io_ports->lbah_addr);
if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
- tf->device = hwif->INB(hwif->io_ports[IDE_SELECT_OFFSET]);
+ tf->device = hwif->INB(io_ports->device_addr);
if (task->tf_flags & IDE_TFLAG_LBA48) {
- hwif->OUTB(drive->ctl | 0x80,
- hwif->io_ports[IDE_CONTROL_OFFSET]);
+ hwif->OUTB(drive->ctl | 0x80, io_ports->ctl_addr);
if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
- tf->hob_feature =
- hwif->INB(hwif->io_ports[IDE_FEATURE_OFFSET]);
+ tf->hob_feature = hwif->INB(io_ports->feature_addr);
if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
- tf->hob_nsect =
- hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]);
+ tf->hob_nsect = hwif->INB(io_ports->nsect_addr);
if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
- tf->hob_lbal =
- hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]);
+ tf->hob_lbal = hwif->INB(io_ports->lbal_addr);
if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
- tf->hob_lbam =
- hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]);
+ tf->hob_lbam = hwif->INB(io_ports->lbam_addr);
if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
- tf->hob_lbah =
- hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]);
+ tf->hob_lbah = hwif->INB(io_ports->lbah_addr);
}
}
@@ -454,7 +449,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
if (err == ABRT_ERR) {
if (drive->select.b.lba &&
/* some newer drives don't support WIN_SPECIFY */
- hwif->INB(hwif->io_ports[IDE_COMMAND_OFFSET]) ==
+ hwif->INB(hwif->io_ports.command_addr) ==
WIN_SPECIFY)
return ide_stopped;
} else if ((err & BAD_CRC) == BAD_CRC) {
@@ -507,8 +502,7 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u
if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
/* force an abort */
- hwif->OUTB(WIN_IDLEIMMEDIATE,
- hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(WIN_IDLEIMMEDIATE, hwif->io_ports.command_addr);
if (rq->errors >= ERROR_MAX) {
ide_kill_rq(drive, rq);
@@ -1421,7 +1415,7 @@ static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
*/
do {
if (hwif->irq == irq) {
- stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ stat = hwif->INB(hwif->io_ports.status_addr);
if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
/* Try to not flood the console with msgs */
static unsigned long last_msgtime, count;
@@ -1511,7 +1505,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
* Whack the status register, just in case
* we have a leftover pending IRQ.
*/
- (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ (void) hwif->INB(hwif->io_ports.status_addr);
#endif /* CONFIG_BLK_DEV_IDEPCI */
}
spin_unlock_irqrestore(&ide_lock, flags);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 9c646bd63549..5425d3038ec2 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -164,7 +164,7 @@ void SELECT_DRIVE (ide_drive_t *drive)
if (port_ops && port_ops->selectproc)
port_ops->selectproc(drive);
- hwif->OUTB(drive->select.all, hwif->io_ports[IDE_SELECT_OFFSET]);
+ hwif->OUTB(drive->select.all, hwif->io_ports.device_addr);
}
void SELECT_MASK (ide_drive_t *drive, int mask)
@@ -194,24 +194,22 @@ static void ata_vlb_sync(ide_drive_t *drive, unsigned long port)
*/
static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
{
- ide_hwif_t *hwif = HWIF(drive);
- u8 io_32bit = drive->io_32bit;
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ u8 io_32bit = drive->io_32bit;
if (io_32bit) {
if (io_32bit & 2) {
unsigned long flags;
local_irq_save(flags);
- ata_vlb_sync(drive, hwif->io_ports[IDE_NSECTOR_OFFSET]);
- hwif->INSL(hwif->io_ports[IDE_DATA_OFFSET], buffer,
- wcount);
+ ata_vlb_sync(drive, io_ports->nsect_addr);
+ hwif->INSL(io_ports->data_addr, buffer, wcount);
local_irq_restore(flags);
} else
- hwif->INSL(hwif->io_ports[IDE_DATA_OFFSET], buffer,
- wcount);
+ hwif->INSL(io_ports->data_addr, buffer, wcount);
} else
- hwif->INSW(hwif->io_ports[IDE_DATA_OFFSET], buffer,
- wcount << 1);
+ hwif->INSW(io_ports->data_addr, buffer, wcount << 1);
}
/*
@@ -219,24 +217,22 @@ static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
*/
static void ata_output_data(ide_drive_t *drive, void *buffer, u32 wcount)
{
- ide_hwif_t *hwif = HWIF(drive);
- u8 io_32bit = drive->io_32bit;
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ u8 io_32bit = drive->io_32bit;
if (io_32bit) {
if (io_32bit & 2) {
unsigned long flags;
local_irq_save(flags);
- ata_vlb_sync(drive, hwif->io_ports[IDE_NSECTOR_OFFSET]);
- hwif->OUTSL(hwif->io_ports[IDE_DATA_OFFSET], buffer,
- wcount);
+ ata_vlb_sync(drive, io_ports->nsect_addr);
+ hwif->OUTSL(io_ports->data_addr, buffer, wcount);
local_irq_restore(flags);
} else
- hwif->OUTSL(hwif->io_ports[IDE_DATA_OFFSET], buffer,
- wcount);
+ hwif->OUTSL(io_ports->data_addr, buffer, wcount);
} else
- hwif->OUTSW(hwif->io_ports[IDE_DATA_OFFSET], buffer,
- wcount << 1);
+ hwif->OUTSW(io_ports->data_addr, buffer, wcount << 1);
}
/*
@@ -255,14 +251,13 @@ static void atapi_input_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
#if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
if (MACH_IS_ATARI || MACH_IS_Q40) {
/* Atari has a byte-swapped IDE interface */
- insw_swapw(hwif->io_ports[IDE_DATA_OFFSET], buffer,
- bytecount / 2);
+ insw_swapw(hwif->io_ports.data_addr, buffer, bytecount / 2);
return;
}
#endif /* CONFIG_ATARI || CONFIG_Q40 */
hwif->ata_input_data(drive, buffer, bytecount / 4);
if ((bytecount & 0x03) >= 2)
- hwif->INSW(hwif->io_ports[IDE_DATA_OFFSET],
+ hwif->INSW(hwif->io_ports.data_addr,
(u8 *)buffer + (bytecount & ~0x03), 1);
}
@@ -274,14 +269,13 @@ static void atapi_output_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
#if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
if (MACH_IS_ATARI || MACH_IS_Q40) {
/* Atari has a byte-swapped IDE interface */
- outsw_swapw(hwif->io_ports[IDE_DATA_OFFSET], buffer,
- bytecount / 2);
+ outsw_swapw(hwif->io_ports.data_addr, buffer, bytecount / 2);
return;
}
#endif /* CONFIG_ATARI || CONFIG_Q40 */
hwif->ata_output_data(drive, buffer, bytecount / 4);
if ((bytecount & 0x03) >= 2)
- hwif->OUTSW(hwif->io_ports[IDE_DATA_OFFSET],
+ hwif->OUTSW(hwif->io_ports.data_addr,
(u8 *)buffer + (bytecount & ~0x03), 1);
}
@@ -445,7 +439,7 @@ int drive_is_ready (ide_drive_t *drive)
* an interrupt with another pci card/device. We make no assumptions
* about possible isa-pnp and pci-pnp issues yet.
*/
- if (hwif->io_ports[IDE_CONTROL_OFFSET])
+ if (hwif->io_ports.ctl_addr)
stat = ide_read_altstatus(drive);
else
/* Note: this may clear a pending IRQ!! */
@@ -647,7 +641,7 @@ int ide_driveid_update(ide_drive_t *drive)
SELECT_MASK(drive, 1);
ide_set_irq(drive, 1);
msleep(50);
- hwif->OUTB(WIN_IDENTIFY, hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(WIN_IDENTIFY, hwif->io_ports.command_addr);
timeout = jiffies + WAIT_WORSTCASE;
do {
if (time_after(jiffies, timeout)) {
@@ -696,6 +690,7 @@ int ide_driveid_update(ide_drive_t *drive)
int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
{
ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
int error = 0;
u8 stat;
@@ -734,10 +729,9 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
SELECT_MASK(drive, 0);
udelay(1);
ide_set_irq(drive, 0);
- hwif->OUTB(speed, hwif->io_ports[IDE_NSECTOR_OFFSET]);
- hwif->OUTB(SETFEATURES_XFER, hwif->io_ports[IDE_FEATURE_OFFSET]);
- hwif->OUTBSYNC(drive, WIN_SETFEATURES,
- hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(speed, io_ports->nsect_addr);
+ hwif->OUTB(SETFEATURES_XFER, io_ports->feature_addr);
+ hwif->OUTBSYNC(drive, WIN_SETFEATURES, io_ports->command_addr);
if (drive->quirk_list == 2)
ide_set_irq(drive, 1);
@@ -845,7 +839,7 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
spin_lock_irqsave(&ide_lock, flags);
__ide_set_handler(drive, handler, timeout, expiry);
- hwif->OUTBSYNC(drive, cmd, hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTBSYNC(drive, cmd, hwif->io_ports.command_addr);
/*
* Drive takes 400nS to respond, we must avoid the IRQ being
* serviced before that.
@@ -1029,6 +1023,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
unsigned long flags;
ide_hwif_t *hwif;
ide_hwgroup_t *hwgroup;
+ struct ide_io_ports *io_ports;
const struct ide_port_ops *port_ops;
u8 ctl;
@@ -1036,6 +1031,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
hwif = HWIF(drive);
hwgroup = HWGROUP(drive);
+ io_ports = &hwif->io_ports;
+
/* We must not reset with running handlers */
BUG_ON(hwgroup->handler != NULL);
@@ -1045,8 +1042,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
pre_reset(drive);
SELECT_DRIVE(drive);
udelay (20);
- hwif->OUTBSYNC(drive, WIN_SRST,
- hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTBSYNC(drive, WIN_SRST, io_ports->command_addr);
ndelay(400);
hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
hwgroup->polling = 1;
@@ -1062,7 +1058,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
for (unit = 0; unit < MAX_DRIVES; ++unit)
pre_reset(&hwif->drives[unit]);
- if (hwif->io_ports[IDE_CONTROL_OFFSET] == 0) {
+ if (io_ports->ctl_addr == 0) {
spin_unlock_irqrestore(&ide_lock, flags);
return ide_stopped;
}
@@ -1077,14 +1073,14 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
* recover from reset very quickly, saving us the first 50ms wait time.
*/
/* set SRST and nIEN */
- hwif->OUTBSYNC(drive, drive->ctl|6, hwif->io_ports[IDE_CONTROL_OFFSET]);
+ hwif->OUTBSYNC(drive, drive->ctl|6, io_ports->ctl_addr);
/* more than enough time */
udelay(10);
if (drive->quirk_list == 2)
ctl = drive->ctl; /* clear SRST and nIEN */
else
ctl = drive->ctl | 2; /* clear SRST, leave nIEN */
- hwif->OUTBSYNC(drive, ctl, hwif->io_ports[IDE_CONTROL_OFFSET]);
+ hwif->OUTBSYNC(drive, ctl, io_ports->ctl_addr);
/* more than enough time */
udelay(10);
hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
@@ -1129,7 +1125,7 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
* about locking issues (2.5 work ?).
*/
mdelay(1);
- stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ stat = hwif->INB(hwif->io_ports.status_addr);
if ((stat & BUSY_STAT) == 0)
return 0;
/*
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index 10c20e9a5785..6a8953f68e9f 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -82,10 +82,7 @@ static void idepnp_remove(struct pnp_dev *dev)
{
ide_hwif_t *hwif = pnp_get_drvdata(dev);
- if (hwif)
- ide_unregister(hwif->index);
- else
- printk(KERN_ERR "idepnp: Unable to remove device, please report.\n");
+ ide_unregister(hwif);
release_region(pnp_port_start(dev, 1), 1);
release_region(pnp_port_start(dev, 0), 8);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index a4b65b321f51..862f02603f9b 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -264,6 +264,7 @@ err_misc:
static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
{
ide_hwif_t *hwif = HWIF(drive);
+ struct ide_io_ports *io_ports = &hwif->io_ports;
int use_altstatus = 0, rc;
unsigned long timeout;
u8 s = 0, a = 0;
@@ -271,7 +272,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
/* take a deep breath */
msleep(50);
- if (hwif->io_ports[IDE_CONTROL_OFFSET]) {
+ if (io_ports->ctl_addr) {
a = ide_read_altstatus(drive);
s = ide_read_status(drive);
if ((a ^ s) & ~INDEX_STAT)
@@ -289,10 +290,10 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
*/
if ((cmd == WIN_PIDENTIFY))
/* disable dma & overlap */
- hwif->OUTB(0, hwif->io_ports[IDE_FEATURE_OFFSET]);
+ hwif->OUTB(0, io_ports->feature_addr);
/* ask drive for ID */
- hwif->OUTB(cmd, hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(cmd, io_ports->command_addr);
timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
timeout += jiffies;
@@ -353,7 +354,7 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
* interrupts during the identify-phase that
* the irq handler isn't expecting.
*/
- if (hwif->io_ports[IDE_CONTROL_OFFSET]) {
+ if (hwif->io_ports.ctl_addr) {
if (!hwif->irq) {
autoprobe = 1;
cookie = probe_irq_on();
@@ -393,7 +394,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
do {
msleep(50);
- stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ stat = hwif->INB(hwif->io_ports.status_addr);
if ((stat & BUSY_STAT) == 0)
return 0;
} while (time_before(jiffies, timeout));
@@ -425,6 +426,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
static int do_probe (ide_drive_t *drive, u8 cmd)
{
ide_hwif_t *hwif = HWIF(drive);
+ struct ide_io_ports *io_ports = &hwif->io_ports;
int rc;
u8 stat;
@@ -445,7 +447,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
msleep(50);
SELECT_DRIVE(drive);
msleep(50);
- if (hwif->INB(hwif->io_ports[IDE_SELECT_OFFSET]) != drive->select.all &&
+ if (hwif->INB(io_ports->device_addr) != drive->select.all &&
!drive->present) {
if (drive->select.b.unit != 0) {
/* exit with drive0 selected */
@@ -472,17 +474,13 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
if (stat == (BUSY_STAT | READY_STAT))
return 4;
- if ((rc == 1 && cmd == WIN_PIDENTIFY) &&
- ((drive->autotune == IDE_TUNE_DEFAULT) ||
- (drive->autotune == IDE_TUNE_AUTO))) {
+ if (rc == 1 && cmd == WIN_PIDENTIFY) {
printk(KERN_ERR "%s: no response (status = 0x%02x), "
"resetting drive\n", drive->name, stat);
msleep(50);
- hwif->OUTB(drive->select.all,
- hwif->io_ports[IDE_SELECT_OFFSET]);
+ hwif->OUTB(drive->select.all, io_ports->device_addr);
msleep(50);
- hwif->OUTB(WIN_SRST,
- hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(WIN_SRST, io_ports->command_addr);
(void)ide_busy_sleep(hwif);
rc = try_to_identify(drive, cmd);
}
@@ -518,7 +516,7 @@ static void enable_nest (ide_drive_t *drive)
printk("%s: enabling %s -- ", hwif->name, drive->id->model);
SELECT_DRIVE(drive);
msleep(50);
- hwif->OUTB(EXABYTE_ENABLE_NEST, hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(EXABYTE_ENABLE_NEST, hwif->io_ports.command_addr);
if (ide_busy_sleep(hwif)) {
printk(KERN_CONT "failed (timeout)\n");
@@ -800,14 +798,9 @@ static int ide_probe_port(ide_hwif_t *hwif)
if (drive->present)
rc = 0;
}
- if (hwif->io_ports[IDE_CONTROL_OFFSET] && hwif->reset) {
- printk(KERN_WARNING "%s: reset\n", hwif->name);
- hwif->OUTB(12, hwif->io_ports[IDE_CONTROL_OFFSET]);
- udelay(10);
- hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]);
- (void)ide_busy_sleep(hwif);
- }
+
local_irq_restore(flags);
+
/*
* Use cached IRQ number. It might be (and is...) changed by probe
* code above
@@ -834,12 +827,7 @@ static void ide_port_tune_devices(ide_hwif_t *hwif)
ide_drive_t *drive = &hwif->drives[unit];
if (drive->present) {
- if (drive->autotune == IDE_TUNE_AUTO)
- ide_set_max_pio(drive);
-
- if (drive->autotune != IDE_TUNE_DEFAULT &&
- drive->autotune != IDE_TUNE_AUTO)
- continue;
+ ide_set_max_pio(drive);
drive->nice1 = 1;
@@ -994,6 +982,7 @@ static void ide_port_setup_devices(ide_hwif_t *hwif)
*/
static int init_irq (ide_hwif_t *hwif)
{
+ struct ide_io_ports *io_ports = &hwif->io_ports;
unsigned int index;
ide_hwgroup_t *hwgroup;
ide_hwif_t *match = NULL;
@@ -1077,9 +1066,9 @@ static int init_irq (ide_hwif_t *hwif)
if (IDE_CHIPSET_IS_PCI(hwif->chipset))
sa = IRQF_SHARED;
- if (hwif->io_ports[IDE_CONTROL_OFFSET])
+ if (io_ports->ctl_addr)
/* clear nIEN */
- hwif->OUTB(0x08, hwif->io_ports[IDE_CONTROL_OFFSET]);
+ hwif->OUTB(0x08, io_ports->ctl_addr);
if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup))
goto out_unlink;
@@ -1095,12 +1084,11 @@ static int init_irq (ide_hwif_t *hwif)
#if !defined(__mc68000__)
printk("%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
- hwif->io_ports[IDE_DATA_OFFSET],
- hwif->io_ports[IDE_DATA_OFFSET]+7,
- hwif->io_ports[IDE_CONTROL_OFFSET], hwif->irq);
+ io_ports->data_addr, io_ports->status_addr,
+ io_ports->ctl_addr, hwif->irq);
#else
printk("%s at 0x%08lx on irq %d", hwif->name,
- hwif->io_ports[IDE_DATA_OFFSET], hwif->irq);
+ io_ports->data_addr, hwif->irq);
#endif /* __mc68000__ */
if (match)
printk(" (%sed with %s)",
@@ -1242,8 +1230,8 @@ static int hwif_init(ide_hwif_t *hwif)
int old_irq;
if (!hwif->irq) {
- if (!(hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET])))
- {
+ hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
+ if (!hwif->irq) {
printk("%s: DISABLED, NO IRQ\n", hwif->name);
return 0;
}
@@ -1272,7 +1260,8 @@ static int hwif_init(ide_hwif_t *hwif)
* It failed to initialise. Find the default IRQ for
* this port and try that.
*/
- if (!(hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET]))) {
+ hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
+ if (!hwif->irq) {
printk("%s: Disabled unable to get IRQ %d.\n",
hwif->name, old_irq);
goto out;
@@ -1336,8 +1325,6 @@ static void ide_port_init_devices(ide_hwif_t *hwif)
drive->unmask = 1;
if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS)
drive->no_unmask = 1;
- if ((hwif->host_flags & IDE_HFLAG_NO_AUTOTUNE) == 0)
- drive->autotune = 1;
}
if (port_ops && port_ops->port_init_devs)
@@ -1518,13 +1505,20 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
int i, rc = 0;
for (i = 0; i < MAX_HWIFS; i++) {
- if (d == NULL || idx[i] == 0xff) {
+ if (idx[i] == 0xff) {
mate = NULL;
continue;
}
hwif = &ide_hwifs[idx[i]];
+ ide_port_apply_params(hwif);
+
+ if (d == NULL) {
+ mate = NULL;
+ continue;
+ }
+
if (d->chipset != ide_etrax100 && (i & 1) && mate) {
hwif->mate = mate;
mate->mate = hwif;
@@ -1621,6 +1615,7 @@ EXPORT_SYMBOL_GPL(ide_device_add);
void ide_port_scan(ide_hwif_t *hwif)
{
+ ide_port_apply_params(hwif);
ide_port_cable_detect(hwif);
ide_port_init_devices(hwif);
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index d9d98ac85b29..7b2f3815a838 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -786,14 +786,6 @@ void ide_proc_register_port(ide_hwif_t *hwif)
}
}
-#ifdef CONFIG_BLK_DEV_IDEPCI
-void ide_pci_create_host_proc(const char *name, get_info_t *get_info)
-{
- create_proc_info_entry(name, 0, proc_ide_root, get_info);
-}
-EXPORT_SYMBOL_GPL(ide_pci_create_host_proc);
-#endif
-
void ide_proc_unregister_port(ide_hwif_t *hwif)
{
if (hwif->proc) {
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index d3d8b8d5157c..29870c415110 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -72,26 +72,6 @@ enum {
#endif
/**************************** Tunable parameters *****************************/
-
-
-/*
- * Pipelined mode parameters.
- *
- * We try to use the minimum number of stages which is enough to keep the tape
- * constantly streaming. To accomplish that, we implement a feedback loop around
- * the maximum number of stages:
- *
- * We start from MIN maximum stages (we will not even use MIN stages if we don't
- * need them), increment it by RATE*(MAX-MIN) whenever we sense that the
- * pipeline is empty, until we reach the optimum value or until we reach MAX.
- *
- * Setting the following parameter to 0 is illegal: the pipelined mode cannot be
- * disabled (idetape_calculate_speeds() divides by tape->max_stages.)
- */
-#define IDETAPE_MIN_PIPELINE_STAGES 1
-#define IDETAPE_MAX_PIPELINE_STAGES 400
-#define IDETAPE_INCREASE_STAGES_RATE 20
-
/*
* After each failed packet command we issue a request sense command and retry
* the packet command IDETAPE_MAX_PC_RETRIES times.
@@ -224,28 +204,17 @@ enum {
/* 0 When the tape position is unknown */
IDETAPE_FLAG_ADDRESS_VALID = (1 << 1),
/* Device already opened */
- IDETAPE_FLAG_BUSY = (1 << 2),
- /* Error detected in a pipeline stage */
- IDETAPE_FLAG_PIPELINE_ERR = (1 << 3),
+ IDETAPE_FLAG_BUSY = (1 << 2),
/* Attempt to auto-detect the current user block size */
- IDETAPE_FLAG_DETECT_BS = (1 << 4),
+ IDETAPE_FLAG_DETECT_BS = (1 << 3),
/* Currently on a filemark */
- IDETAPE_FLAG_FILEMARK = (1 << 5),
+ IDETAPE_FLAG_FILEMARK = (1 << 4),
/* DRQ interrupt device */
- IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 6),
- /* pipeline active */
- IDETAPE_FLAG_PIPELINE_ACTIVE = (1 << 7),
+ IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 5),
/* 0 = no tape is loaded, so we don't rewind after ejecting */
- IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 8),
+ IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 6),
};
-/* A pipeline stage. */
-typedef struct idetape_stage_s {
- struct request rq; /* The corresponding request */
- struct idetape_bh *bh; /* The data buffers */
- struct idetape_stage_s *next; /* Pointer to the next stage */
-} idetape_stage_t;
-
/*
* Most of our global data which we need to save even as we leave the driver due
* to an interrupt or a timer event is stored in the struct defined below.
@@ -289,9 +258,7 @@ typedef struct ide_tape_obj {
* While polling for DSC we use postponed_rq to postpone the current
* request so that ide.c will be able to service pending requests on the
* other device. Note that at most we will have only one DSC (usually
- * data transfer) request in the device request queue. Additional
- * requests can be queued in our internal pipeline, but they will be
- * visible to ide.c only one at a time.
+ * data transfer) request in the device request queue.
*/
struct request *postponed_rq;
/* The time in which we started polling for DSC */
@@ -331,43 +298,20 @@ typedef struct ide_tape_obj {
* At most, there is only one ide-tape originated data transfer request
* in the device request queue. This allows ide.c to easily service
* requests from the other device when we postpone our active request.
- * In the pipelined operation mode, we use our internal pipeline
- * structure to hold more data requests. The data buffer size is chosen
- * based on the tape's recommendation.
*/
- /* ptr to the request which is waiting in the device request queue */
- struct request *active_data_rq;
+
/* Data buffer size chosen based on the tape's recommendation */
- int stage_size;
- idetape_stage_t *merge_stage;
- int merge_stage_size;
+ int buffer_size;
+ /* merge buffer */
+ struct idetape_bh *merge_bh;
+ /* size of the merge buffer */
+ int merge_bh_size;
+ /* pointer to current buffer head within the merge buffer */
struct idetape_bh *bh;
char *b_data;
int b_count;
- /*
- * Pipeline parameters.
- *
- * To accomplish non-pipelined mode, we simply set the following
- * variables to zero (or NULL, where appropriate).
- */
- /* Number of currently used stages */
- int nr_stages;
- /* Number of pending stages */
- int nr_pending_stages;
- /* We will not allocate more than this number of stages */
- int max_stages, min_pipeline, max_pipeline;
- /* The first stage which will be removed from the pipeline */
- idetape_stage_t *first_stage;
- /* The currently active stage */
- idetape_stage_t *active_stage;
- /* Will be serviced after the currently active request */
- idetape_stage_t *next_stage;
- /* New requests will be added to the pipeline here */
- idetape_stage_t *last_stage;
- /* Optional free stage which we can use */
- idetape_stage_t *cache_stage;
- int pages_per_stage;
+ int pages_per_buffer;
/* Wasted space in each stage */
int excess_bh_size;
@@ -388,45 +332,6 @@ typedef struct ide_tape_obj {
/* the tape is write protected (hardware or opened as read-only) */
char write_prot;
- /*
- * Limit the number of times a request can be postponed, to avoid an
- * infinite postpone deadlock.
- */
- int postpone_cnt;
-
- /*
- * Measures number of frames:
- *
- * 1. written/read to/from the driver pipeline (pipeline_head).
- * 2. written/read to/from the tape buffers (idetape_bh).
- * 3. written/read by the tape to/from the media (tape_head).
- */
- int pipeline_head;
- int buffer_head;
- int tape_head;
- int last_tape_head;
-
- /* Speed control at the tape buffers input/output */
- unsigned long insert_time;
- int insert_size;
- int insert_speed;
- int max_insert_speed;
- int measure_insert_time;
-
- /* Speed regulation negative feedback loop */
- int speed_control;
- int pipeline_head_speed;
- int controlled_pipeline_head_speed;
- int uncontrolled_pipeline_head_speed;
- int controlled_last_pipeline_head;
- unsigned long uncontrolled_pipeline_head_time;
- unsigned long controlled_pipeline_head_time;
- int controlled_previous_pipeline_head;
- int uncontrolled_previous_pipeline_head;
- unsigned long controlled_previous_head_time;
- unsigned long uncontrolled_previous_head_time;
- int restart_speed_control_req;
-
u32 debug_mask;
} idetape_tape_t;
@@ -674,128 +579,36 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
}
}
-static void idetape_activate_next_stage(ide_drive_t *drive)
+/* Free data buffers completely. */
+static void ide_tape_kfree_buffer(idetape_tape_t *tape)
{
- idetape_tape_t *tape = drive->driver_data;
- idetape_stage_t *stage = tape->next_stage;
- struct request *rq = &stage->rq;
+ struct idetape_bh *prev_bh, *bh = tape->merge_bh;
- debug_log(DBG_PROCS, "Enter %s\n", __func__);
+ while (bh) {
+ u32 size = bh->b_size;
- if (stage == NULL) {
- printk(KERN_ERR "ide-tape: bug: Trying to activate a non"
- " existing stage\n");
- return;
- }
+ while (size) {
+ unsigned int order = fls(size >> PAGE_SHIFT)-1;
- rq->rq_disk = tape->disk;
- rq->buffer = NULL;
- rq->special = (void *)stage->bh;
- tape->active_data_rq = rq;
- tape->active_stage = stage;
- tape->next_stage = stage->next;
-}
-
-/* Free a stage along with its related buffers completely. */
-static void __idetape_kfree_stage(idetape_stage_t *stage)
-{
- struct idetape_bh *prev_bh, *bh = stage->bh;
- int size;
-
- while (bh != NULL) {
- if (bh->b_data != NULL) {
- size = (int) bh->b_size;
- while (size > 0) {
- free_page((unsigned long) bh->b_data);
- size -= PAGE_SIZE;
- bh->b_data += PAGE_SIZE;
- }
+ if (bh->b_data)
+ free_pages((unsigned long)bh->b_data, order);
+
+ size &= (order-1);
+ bh->b_data += (1 << order) * PAGE_SIZE;
}
prev_bh = bh;
bh = bh->b_reqnext;
kfree(prev_bh);
}
- kfree(stage);
-}
-
-static void idetape_kfree_stage(idetape_tape_t *tape, idetape_stage_t *stage)
-{
- __idetape_kfree_stage(stage);
-}
-
-/*
- * Remove tape->first_stage from the pipeline. The caller should avoid race
- * conditions.
- */
-static void idetape_remove_stage_head(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- idetape_stage_t *stage;
-
- debug_log(DBG_PROCS, "Enter %s\n", __func__);
-
- if (tape->first_stage == NULL) {
- printk(KERN_ERR "ide-tape: bug: tape->first_stage is NULL\n");
- return;
- }
- if (tape->active_stage == tape->first_stage) {
- printk(KERN_ERR "ide-tape: bug: Trying to free our active "
- "pipeline stage\n");
- return;
- }
- stage = tape->first_stage;
- tape->first_stage = stage->next;
- idetape_kfree_stage(tape, stage);
- tape->nr_stages--;
- if (tape->first_stage == NULL) {
- tape->last_stage = NULL;
- if (tape->next_stage != NULL)
- printk(KERN_ERR "ide-tape: bug: tape->next_stage !="
- " NULL\n");
- if (tape->nr_stages)
- printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 "
- "now\n");
- }
+ kfree(tape->merge_bh);
}
-/*
- * This will free all the pipeline stages starting from new_last_stage->next
- * to the end of the list, and point tape->last_stage to new_last_stage.
- */
-static void idetape_abort_pipeline(ide_drive_t *drive,
- idetape_stage_t *new_last_stage)
-{
- idetape_tape_t *tape = drive->driver_data;
- idetape_stage_t *stage = new_last_stage->next;
- idetape_stage_t *nstage;
-
- debug_log(DBG_PROCS, "%s: Enter %s\n", tape->name, __func__);
-
- while (stage) {
- nstage = stage->next;
- idetape_kfree_stage(tape, stage);
- --tape->nr_stages;
- --tape->nr_pending_stages;
- stage = nstage;
- }
- if (new_last_stage)
- new_last_stage->next = NULL;
- tape->last_stage = new_last_stage;
- tape->next_stage = NULL;
-}
-
-/*
- * Finish servicing a request and insert a pending pipeline request into the
- * main device queue.
- */
static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
{
struct request *rq = HWGROUP(drive)->rq;
idetape_tape_t *tape = drive->driver_data;
unsigned long flags;
int error;
- int remove_stage = 0;
- idetape_stage_t *active_stage;
debug_log(DBG_PROCS, "Enter %s\n", __func__);
@@ -815,58 +628,8 @@ static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
spin_lock_irqsave(&tape->lock, flags);
- /* The request was a pipelined data transfer request */
- if (tape->active_data_rq == rq) {
- active_stage = tape->active_stage;
- tape->active_stage = NULL;
- tape->active_data_rq = NULL;
- tape->nr_pending_stages--;
- if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
- remove_stage = 1;
- if (error) {
- set_bit(IDETAPE_FLAG_PIPELINE_ERR,
- &tape->flags);
- if (error == IDETAPE_ERROR_EOD)
- idetape_abort_pipeline(drive,
- active_stage);
- }
- } else if (rq->cmd[0] & REQ_IDETAPE_READ) {
- if (error == IDETAPE_ERROR_EOD) {
- set_bit(IDETAPE_FLAG_PIPELINE_ERR,
- &tape->flags);
- idetape_abort_pipeline(drive, active_stage);
- }
- }
- if (tape->next_stage != NULL) {
- idetape_activate_next_stage(drive);
-
- /* Insert the next request into the request queue. */
- (void)ide_do_drive_cmd(drive, tape->active_data_rq,
- ide_end);
- } else if (!error) {
- /*
- * This is a part of the feedback loop which tries to
- * find the optimum number of stages. We are starting
- * from a minimum maximum number of stages, and if we
- * sense that the pipeline is empty, we try to increase
- * it, until we reach the user compile time memory
- * limit.
- */
- int i = (tape->max_pipeline - tape->min_pipeline) / 10;
-
- tape->max_stages += max(i, 1);
- tape->max_stages = max(tape->max_stages,
- tape->min_pipeline);
- tape->max_stages = min(tape->max_stages,
- tape->max_pipeline);
- }
- }
ide_end_drive_cmd(drive, 0, 0);
- if (remove_stage)
- idetape_remove_stage_head(drive);
- if (tape->active_data_rq == NULL)
- clear_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
spin_unlock_irqrestore(&tape->lock, flags);
return 0;
}
@@ -1083,10 +846,10 @@ static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
return ide_do_reset(drive);
}
/* Get the number of bytes to transfer on this interrupt. */
- bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
- hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
+ bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
+ hwif->INB(hwif->io_ports.lbam_addr);
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+ ireason = hwif->INB(hwif->io_ports.nsect_addr);
if (ireason & CD) {
printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__);
@@ -1190,12 +953,12 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
"yet DRQ isn't asserted\n");
return startstop;
}
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+ ireason = hwif->INB(hwif->io_ports.nsect_addr);
while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
"a packet command, retrying\n");
udelay(100);
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+ ireason = hwif->INB(hwif->io_ports.nsect_addr);
if (retries == 0) {
printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
"issuing a packet command, ignoring\n");
@@ -1292,7 +1055,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
IDETAPE_WAIT_CMD, NULL);
return ide_started;
} else {
- hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr);
return idetape_transfer_pc(drive);
}
}
@@ -1335,69 +1098,6 @@ static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
pc->idetape_callback = &idetape_pc_callback;
}
-static void idetape_calculate_speeds(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- if (time_after(jiffies,
- tape->controlled_pipeline_head_time + 120 * HZ)) {
- tape->controlled_previous_pipeline_head =
- tape->controlled_last_pipeline_head;
- tape->controlled_previous_head_time =
- tape->controlled_pipeline_head_time;
- tape->controlled_last_pipeline_head = tape->pipeline_head;
- tape->controlled_pipeline_head_time = jiffies;
- }
- if (time_after(jiffies, tape->controlled_pipeline_head_time + 60 * HZ))
- tape->controlled_pipeline_head_speed = (tape->pipeline_head -
- tape->controlled_last_pipeline_head) * 32 * HZ /
- (jiffies - tape->controlled_pipeline_head_time);
- else if (time_after(jiffies, tape->controlled_previous_head_time))
- tape->controlled_pipeline_head_speed = (tape->pipeline_head -
- tape->controlled_previous_pipeline_head) * 32 *
- HZ / (jiffies - tape->controlled_previous_head_time);
-
- if (tape->nr_pending_stages < tape->max_stages/*- 1 */) {
- /* -1 for read mode error recovery */
- if (time_after(jiffies, tape->uncontrolled_previous_head_time +
- 10 * HZ)) {
- tape->uncontrolled_pipeline_head_time = jiffies;
- tape->uncontrolled_pipeline_head_speed =
- (tape->pipeline_head -
- tape->uncontrolled_previous_pipeline_head) *
- 32 * HZ / (jiffies -
- tape->uncontrolled_previous_head_time);
- }
- } else {
- tape->uncontrolled_previous_head_time = jiffies;
- tape->uncontrolled_previous_pipeline_head = tape->pipeline_head;
- if (time_after(jiffies, tape->uncontrolled_pipeline_head_time +
- 30 * HZ))
- tape->uncontrolled_pipeline_head_time = jiffies;
-
- }
- tape->pipeline_head_speed = max(tape->uncontrolled_pipeline_head_speed,
- tape->controlled_pipeline_head_speed);
-
- if (tape->speed_control == 1) {
- if (tape->nr_pending_stages >= tape->max_stages / 2)
- tape->max_insert_speed = tape->pipeline_head_speed +
- (1100 - tape->pipeline_head_speed) * 2 *
- (tape->nr_pending_stages - tape->max_stages / 2)
- / tape->max_stages;
- else
- tape->max_insert_speed = 500 +
- (tape->pipeline_head_speed - 500) * 2 *
- tape->nr_pending_stages / tape->max_stages;
-
- if (tape->nr_pending_stages >= tape->max_stages * 99 / 100)
- tape->max_insert_speed = 5000;
- } else
- tape->max_insert_speed = tape->speed_control;
-
- tape->max_insert_speed = max(tape->max_insert_speed, 500);
-}
-
static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
@@ -1432,17 +1132,7 @@ static ide_startstop_t idetape_rw_callback(ide_drive_t *drive)
int blocks = tape->pc->xferred / tape->blk_size;
tape->avg_size += blocks * tape->blk_size;
- tape->insert_size += blocks * tape->blk_size;
- if (tape->insert_size > 1024 * 1024)
- tape->measure_insert_time = 1;
- if (tape->measure_insert_time) {
- tape->measure_insert_time = 0;
- tape->insert_time = jiffies;
- tape->insert_size = 0;
- }
- if (time_after(jiffies, tape->insert_time))
- tape->insert_speed = tape->insert_size / 1024 * HZ /
- (jiffies - tape->insert_time);
+
if (time_after_eq(jiffies, tape->avg_time + HZ)) {
tape->avg_speed = tape->avg_size * HZ /
(jiffies - tape->avg_time) / 1024;
@@ -1475,7 +1165,7 @@ static void idetape_create_read_cmd(idetape_tape_t *tape,
pc->buf = NULL;
pc->buf_size = length * tape->blk_size;
pc->req_xfer = pc->buf_size;
- if (pc->req_xfer == tape->stage_size)
+ if (pc->req_xfer == tape->buffer_size)
pc->flags |= PC_FLAG_DMA_RECOMMENDED;
}
@@ -1495,7 +1185,7 @@ static void idetape_create_write_cmd(idetape_tape_t *tape,
pc->buf = NULL;
pc->buf_size = length * tape->blk_size;
pc->req_xfer = pc->buf_size;
- if (pc->req_xfer == tape->stage_size)
+ if (pc->req_xfer == tape->buffer_size)
pc->flags |= PC_FLAG_DMA_RECOMMENDED;
}
@@ -1547,10 +1237,6 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
drive->post_reset = 0;
}
- if (time_after(jiffies, tape->insert_time))
- tape->insert_speed = tape->insert_size / 1024 * HZ /
- (jiffies - tape->insert_time);
- idetape_calculate_speeds(drive);
if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) &&
(stat & SEEK_STAT) == 0) {
if (postponed_rq == NULL) {
@@ -1574,16 +1260,12 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
return ide_stopped;
}
if (rq->cmd[0] & REQ_IDETAPE_READ) {
- tape->buffer_head++;
- tape->postpone_cnt = 0;
pc = idetape_next_pc_storage(drive);
idetape_create_read_cmd(tape, pc, rq->current_nr_sectors,
(struct idetape_bh *)rq->special);
goto out;
}
if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
- tape->buffer_head++;
- tape->postpone_cnt = 0;
pc = idetape_next_pc_storage(drive);
idetape_create_write_cmd(tape, pc, rq->current_nr_sectors,
(struct idetape_bh *)rq->special);
@@ -1604,103 +1286,91 @@ out:
return idetape_issue_pc(drive, pc);
}
-/* Pipeline related functions */
-
/*
- * The function below uses __get_free_page to allocate a pipeline stage, along
- * with all the necessary small buffers which together make a buffer of size
- * tape->stage_size (or a bit more). We attempt to combine sequential pages as
+ * The function below uses __get_free_pages to allocate a data buffer of size
+ * tape->buffer_size (or a bit more). We attempt to combine sequential pages as
* much as possible.
*
- * It returns a pointer to the new allocated stage, or NULL if we can't (or
- * don't want to) allocate a stage.
- *
- * Pipeline stages are optional and are used to increase performance. If we
- * can't allocate them, we'll manage without them.
+ * It returns a pointer to the newly allocated buffer, or NULL in case of
+ * failure.
*/
-static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full,
- int clear)
+static struct idetape_bh *ide_tape_kmalloc_buffer(idetape_tape_t *tape,
+ int full, int clear)
{
- idetape_stage_t *stage;
- struct idetape_bh *prev_bh, *bh;
- int pages = tape->pages_per_stage;
+ struct idetape_bh *prev_bh, *bh, *merge_bh;
+ int pages = tape->pages_per_buffer;
+ unsigned int order, b_allocd;
char *b_data = NULL;
- stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL);
- if (!stage)
- return NULL;
- stage->next = NULL;
-
- stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
- bh = stage->bh;
+ merge_bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
+ bh = merge_bh;
if (bh == NULL)
goto abort;
- bh->b_reqnext = NULL;
- bh->b_data = (char *) __get_free_page(GFP_KERNEL);
+
+ order = fls(pages) - 1;
+ bh->b_data = (char *) __get_free_pages(GFP_KERNEL, order);
if (!bh->b_data)
goto abort;
+ b_allocd = (1 << order) * PAGE_SIZE;
+ pages &= (order-1);
+
if (clear)
- memset(bh->b_data, 0, PAGE_SIZE);
- bh->b_size = PAGE_SIZE;
+ memset(bh->b_data, 0, b_allocd);
+ bh->b_reqnext = NULL;
+ bh->b_size = b_allocd;
atomic_set(&bh->b_count, full ? bh->b_size : 0);
- while (--pages) {
- b_data = (char *) __get_free_page(GFP_KERNEL);
+ while (pages) {
+ order = fls(pages) - 1;
+ b_data = (char *) __get_free_pages(GFP_KERNEL, order);
if (!b_data)
goto abort;
+ b_allocd = (1 << order) * PAGE_SIZE;
+
if (clear)
- memset(b_data, 0, PAGE_SIZE);
- if (bh->b_data == b_data + PAGE_SIZE) {
- bh->b_size += PAGE_SIZE;
- bh->b_data -= PAGE_SIZE;
+ memset(b_data, 0, b_allocd);
+
+ /* newly allocated page frames below buffer header or ...*/
+ if (bh->b_data == b_data + b_allocd) {
+ bh->b_size += b_allocd;
+ bh->b_data -= b_allocd;
if (full)
- atomic_add(PAGE_SIZE, &bh->b_count);
+ atomic_add(b_allocd, &bh->b_count);
continue;
}
+ /* they are above the header */
if (b_data == bh->b_data + bh->b_size) {
- bh->b_size += PAGE_SIZE;
+ bh->b_size += b_allocd;
if (full)
- atomic_add(PAGE_SIZE, &bh->b_count);
+ atomic_add(b_allocd, &bh->b_count);
continue;
}
prev_bh = bh;
bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
if (!bh) {
- free_page((unsigned long) b_data);
+ free_pages((unsigned long) b_data, order);
goto abort;
}
bh->b_reqnext = NULL;
bh->b_data = b_data;
- bh->b_size = PAGE_SIZE;
+ bh->b_size = b_allocd;
atomic_set(&bh->b_count, full ? bh->b_size : 0);
prev_bh->b_reqnext = bh;
+
+ pages &= (order-1);
}
+
bh->b_size -= tape->excess_bh_size;
if (full)
atomic_sub(tape->excess_bh_size, &bh->b_count);
- return stage;
+ return merge_bh;
abort:
- __idetape_kfree_stage(stage);
+ ide_tape_kfree_buffer(tape);
return NULL;
}
-static idetape_stage_t *idetape_kmalloc_stage(idetape_tape_t *tape)
-{
- idetape_stage_t *cache_stage = tape->cache_stage;
-
- debug_log(DBG_PROCS, "Enter %s\n", __func__);
-
- if (tape->nr_stages >= tape->max_stages)
- return NULL;
- if (cache_stage != NULL) {
- tape->cache_stage = NULL;
- return cache_stage;
- }
- return __idetape_kmalloc_stage(tape, 0, 0);
-}
-
static int idetape_copy_stage_from_user(idetape_tape_t *tape,
- idetape_stage_t *stage, const char __user *buf, int n)
+ const char __user *buf, int n)
{
struct idetape_bh *bh = tape->bh;
int count;
@@ -1732,7 +1402,7 @@ static int idetape_copy_stage_from_user(idetape_tape_t *tape,
}
static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
- idetape_stage_t *stage, int n)
+ int n)
{
struct idetape_bh *bh = tape->bh;
int count;
@@ -1763,11 +1433,11 @@ static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
return ret;
}
-static void idetape_init_merge_stage(idetape_tape_t *tape)
+static void idetape_init_merge_buffer(idetape_tape_t *tape)
{
- struct idetape_bh *bh = tape->merge_stage->bh;
+ struct idetape_bh *bh = tape->merge_bh;
+ tape->bh = tape->merge_bh;
- tape->bh = bh;
if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
atomic_set(&bh->b_count, 0);
else {
@@ -1776,61 +1446,6 @@ static void idetape_init_merge_stage(idetape_tape_t *tape)
}
}
-static void idetape_switch_buffers(idetape_tape_t *tape, idetape_stage_t *stage)
-{
- struct idetape_bh *tmp;
-
- tmp = stage->bh;
- stage->bh = tape->merge_stage->bh;
- tape->merge_stage->bh = tmp;
- idetape_init_merge_stage(tape);
-}
-
-/* Add a new stage at the end of the pipeline. */
-static void idetape_add_stage_tail(ide_drive_t *drive, idetape_stage_t *stage)
-{
- idetape_tape_t *tape = drive->driver_data;
- unsigned long flags;
-
- debug_log(DBG_PROCS, "Enter %s\n", __func__);
-
- spin_lock_irqsave(&tape->lock, flags);
- stage->next = NULL;
- if (tape->last_stage != NULL)
- tape->last_stage->next = stage;
- else
- tape->first_stage = stage;
- tape->next_stage = stage;
- tape->last_stage = stage;
- if (tape->next_stage == NULL)
- tape->next_stage = tape->last_stage;
- tape->nr_stages++;
- tape->nr_pending_stages++;
- spin_unlock_irqrestore(&tape->lock, flags);
-}
-
-/* Install a completion in a pending request and sleep until it is serviced. The
- * caller should ensure that the request will not be serviced before we install
- * the completion (usually by disabling interrupts).
- */
-static void idetape_wait_for_request(ide_drive_t *drive, struct request *rq)
-{
- DECLARE_COMPLETION_ONSTACK(wait);
- idetape_tape_t *tape = drive->driver_data;
-
- if (rq == NULL || !blk_special_request(rq)) {
- printk(KERN_ERR "ide-tape: bug: Trying to sleep on non-valid"
- " request\n");
- return;
- }
- rq->end_io_data = &wait;
- rq->end_io = blk_end_sync_rq;
- spin_unlock_irq(&tape->lock);
- wait_for_completion(&wait);
- /* The stage and its struct request have been deallocated */
- spin_lock_irq(&tape->lock);
-}
-
static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
@@ -1899,7 +1514,7 @@ static void idetape_create_test_unit_ready_cmd(struct ide_atapi_pc *pc)
* to the request list without waiting for it to be serviced! In that case, we
* usually use idetape_queue_pc_head().
*/
-static int __idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
+static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
{
struct ide_tape_obj *tape = drive->driver_data;
struct request rq;
@@ -1931,7 +1546,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
timeout += jiffies;
while (time_before(jiffies, timeout)) {
idetape_create_test_unit_ready_cmd(&pc);
- if (!__idetape_queue_pc_tail(drive, &pc))
+ if (!idetape_queue_pc_tail(drive, &pc))
return 0;
if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
|| (tape->asc == 0x3A)) {
@@ -1940,7 +1555,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
return -ENOMEDIUM;
idetape_create_load_unload_cmd(drive, &pc,
IDETAPE_LU_LOAD_MASK);
- __idetape_queue_pc_tail(drive, &pc);
+ idetape_queue_pc_tail(drive, &pc);
load_attempted = 1;
/* not about to be ready */
} else if (!(tape->sense_key == 2 && tape->asc == 4 &&
@@ -1951,11 +1566,6 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
return -EIO;
}
-static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
-{
- return __idetape_queue_pc_tail(drive, pc);
-}
-
static int idetape_flush_tape_buffers(ide_drive_t *drive)
{
struct ide_atapi_pc pc;
@@ -2021,50 +1631,21 @@ static int idetape_create_prevent_cmd(ide_drive_t *drive,
return 1;
}
-static int __idetape_discard_read_pipeline(ide_drive_t *drive)
+static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
- unsigned long flags;
- int cnt;
if (tape->chrdev_dir != IDETAPE_DIR_READ)
- return 0;
+ return;
- /* Remove merge stage. */
- cnt = tape->merge_stage_size / tape->blk_size;
- if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
- ++cnt; /* Filemarks count as 1 sector */
- tape->merge_stage_size = 0;
- if (tape->merge_stage != NULL) {
- __idetape_kfree_stage(tape->merge_stage);
- tape->merge_stage = NULL;
+ clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags);
+ tape->merge_bh_size = 0;
+ if (tape->merge_bh != NULL) {
+ ide_tape_kfree_buffer(tape);
+ tape->merge_bh = NULL;
}
- /* Clear pipeline flags. */
- clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
tape->chrdev_dir = IDETAPE_DIR_NONE;
-
- /* Remove pipeline stages. */
- if (tape->first_stage == NULL)
- return 0;
-
- spin_lock_irqsave(&tape->lock, flags);
- tape->next_stage = NULL;
- if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags))
- idetape_wait_for_request(drive, tape->active_data_rq);
- spin_unlock_irqrestore(&tape->lock, flags);
-
- while (tape->first_stage != NULL) {
- struct request *rq_ptr = &tape->first_stage->rq;
-
- cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
- if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
- ++cnt;
- idetape_remove_stage_head(drive);
- }
- tape->nr_pending_stages = 0;
- tape->max_stages = tape->min_pipeline;
- return cnt;
}
/*
@@ -2081,7 +1662,7 @@ static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
struct ide_atapi_pc pc;
if (tape->chrdev_dir == IDETAPE_DIR_READ)
- __idetape_discard_read_pipeline(drive);
+ __ide_tape_discard_merge_buffer(drive);
idetape_wait_ready(drive, 60 * 5 * HZ);
idetape_create_locate_cmd(drive, &pc, block, partition, skip);
retval = idetape_queue_pc_tail(drive, &pc);
@@ -2092,20 +1673,19 @@ static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
return (idetape_queue_pc_tail(drive, &pc));
}
-static void idetape_discard_read_pipeline(ide_drive_t *drive,
+static void ide_tape_discard_merge_buffer(ide_drive_t *drive,
int restore_position)
{
idetape_tape_t *tape = drive->driver_data;
- int cnt;
int seek, position;
- cnt = __idetape_discard_read_pipeline(drive);
+ __ide_tape_discard_merge_buffer(drive);
if (restore_position) {
position = idetape_read_position(drive);
- seek = position > cnt ? position - cnt : 0;
+ seek = position > 0 ? position : 0;
if (idetape_position_tape(drive, seek, 0, 0)) {
printk(KERN_INFO "ide-tape: %s: position_tape failed in"
- " discard_pipeline()\n", tape->name);
+ " %s\n", tape->name, __func__);
return;
}
}
@@ -2123,12 +1703,6 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
- if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
- printk(KERN_ERR "ide-tape: bug: the pipeline is active in %s\n",
- __func__);
- return (0);
- }
-
idetape_init_rq(&rq, cmd);
rq.rq_disk = tape->disk;
rq.special = (void *)bh;
@@ -2140,26 +1714,13 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
return 0;
- if (tape->merge_stage)
- idetape_init_merge_stage(tape);
+ if (tape->merge_bh)
+ idetape_init_merge_buffer(tape);
if (rq.errors == IDETAPE_ERROR_GENERAL)
return -EIO;
return (tape->blk_size * (blocks-rq.current_nr_sectors));
}
-/* start servicing the pipeline stages, starting from tape->next_stage. */
-static void idetape_plug_pipeline(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- if (tape->next_stage == NULL)
- return;
- if (!test_and_set_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
- idetape_activate_next_stage(drive);
- (void) ide_do_drive_cmd(drive, tape->active_data_rq, ide_end);
- }
-}
-
static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc)
{
idetape_init_pc(pc);
@@ -2197,137 +1758,39 @@ static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
pc->idetape_callback = &idetape_pc_callback;
}
-static void idetape_wait_first_stage(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- unsigned long flags;
-
- if (tape->first_stage == NULL)
- return;
- spin_lock_irqsave(&tape->lock, flags);
- if (tape->active_stage == tape->first_stage)
- idetape_wait_for_request(drive, tape->active_data_rq);
- spin_unlock_irqrestore(&tape->lock, flags);
-}
-
-/*
- * Try to add a character device originated write request to our pipeline. In
- * case we don't succeed, we revert to non-pipelined operation mode for this
- * request. In order to accomplish that, we
- *
- * 1. Try to allocate a new pipeline stage.
- * 2. If we can't, wait for more and more requests to be serviced and try again
- * each time.
- * 3. If we still can't allocate a stage, fallback to non-pipelined operation
- * mode for this request.
- */
+/* Queue up a character device originated write request. */
static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
{
idetape_tape_t *tape = drive->driver_data;
- idetape_stage_t *new_stage;
- unsigned long flags;
- struct request *rq;
debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
- /* Attempt to allocate a new stage. Beware possible race conditions. */
- while ((new_stage = idetape_kmalloc_stage(tape)) == NULL) {
- spin_lock_irqsave(&tape->lock, flags);
- if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
- idetape_wait_for_request(drive, tape->active_data_rq);
- spin_unlock_irqrestore(&tape->lock, flags);
- } else {
- spin_unlock_irqrestore(&tape->lock, flags);
- idetape_plug_pipeline(drive);
- if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
- &tape->flags))
- continue;
- /*
- * The machine is short on memory. Fallback to non-
- * pipelined operation mode for this request.
- */
- return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
- blocks, tape->merge_stage->bh);
- }
- }
- rq = &new_stage->rq;
- idetape_init_rq(rq, REQ_IDETAPE_WRITE);
- /* Doesn't actually matter - We always assume sequential access */
- rq->sector = tape->first_frame;
- rq->current_nr_sectors = blocks;
- rq->nr_sectors = blocks;
-
- idetape_switch_buffers(tape, new_stage);
- idetape_add_stage_tail(drive, new_stage);
- tape->pipeline_head++;
- idetape_calculate_speeds(drive);
-
- /*
- * Estimate whether the tape has stopped writing by checking if our
- * write pipeline is currently empty. If we are not writing anymore,
- * wait for the pipeline to be almost completely full (90%) before
- * starting to service requests, so that we will be able to keep up with
- * the higher speeds of the tape.
- */
- if (!test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
- if (tape->nr_stages >= tape->max_stages * 9 / 10 ||
- tape->nr_stages >= tape->max_stages -
- tape->uncontrolled_pipeline_head_speed * 3 * 1024 /
- tape->blk_size) {
- tape->measure_insert_time = 1;
- tape->insert_time = jiffies;
- tape->insert_size = 0;
- tape->insert_speed = 0;
- idetape_plug_pipeline(drive);
- }
- }
- if (test_and_clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
- /* Return a deferred error */
- return -EIO;
- return blocks;
+ return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
+ blocks, tape->merge_bh);
}
-/*
- * Wait until all pending pipeline requests are serviced. Typically called on
- * device close.
- */
-static void idetape_wait_for_pipeline(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- unsigned long flags;
-
- while (tape->next_stage || test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
- &tape->flags)) {
- idetape_plug_pipeline(drive);
- spin_lock_irqsave(&tape->lock, flags);
- if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags))
- idetape_wait_for_request(drive, tape->active_data_rq);
- spin_unlock_irqrestore(&tape->lock, flags);
- }
-}
-
-static void idetape_empty_write_pipeline(ide_drive_t *drive)
+static void ide_tape_flush_merge_buffer(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
int blocks, min;
struct idetape_bh *bh;
if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
- printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline,"
+ printk(KERN_ERR "ide-tape: bug: Trying to empty merge buffer"
" but we are not writing.\n");
return;
}
- if (tape->merge_stage_size > tape->stage_size) {
+ if (tape->merge_bh_size > tape->buffer_size) {
printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
- tape->merge_stage_size = tape->stage_size;
+ tape->merge_bh_size = tape->buffer_size;
}
- if (tape->merge_stage_size) {
- blocks = tape->merge_stage_size / tape->blk_size;
- if (tape->merge_stage_size % tape->blk_size) {
+ if (tape->merge_bh_size) {
+ blocks = tape->merge_bh_size / tape->blk_size;
+ if (tape->merge_bh_size % tape->blk_size) {
unsigned int i;
blocks++;
- i = tape->blk_size - tape->merge_stage_size %
+ i = tape->blk_size - tape->merge_bh_size %
tape->blk_size;
bh = tape->bh->b_reqnext;
while (bh) {
@@ -2351,74 +1814,33 @@ static void idetape_empty_write_pipeline(ide_drive_t *drive)
}
}
(void) idetape_add_chrdev_write_request(drive, blocks);
- tape->merge_stage_size = 0;
+ tape->merge_bh_size = 0;
}
- idetape_wait_for_pipeline(drive);
- if (tape->merge_stage != NULL) {
- __idetape_kfree_stage(tape->merge_stage);
- tape->merge_stage = NULL;
+ if (tape->merge_bh != NULL) {
+ ide_tape_kfree_buffer(tape);
+ tape->merge_bh = NULL;
}
- clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
tape->chrdev_dir = IDETAPE_DIR_NONE;
-
- /*
- * On the next backup, perform the feedback loop again. (I don't want to
- * keep sense information between backups, as some systems are
- * constantly on, and the system load can be totally different on the
- * next backup).
- */
- tape->max_stages = tape->min_pipeline;
- if (tape->first_stage != NULL ||
- tape->next_stage != NULL ||
- tape->last_stage != NULL ||
- tape->nr_stages != 0) {
- printk(KERN_ERR "ide-tape: ide-tape pipeline bug, "
- "first_stage %p, next_stage %p, "
- "last_stage %p, nr_stages %d\n",
- tape->first_stage, tape->next_stage,
- tape->last_stage, tape->nr_stages);
- }
}
-static void idetape_restart_speed_control(ide_drive_t *drive)
+static int idetape_init_read(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
-
- tape->restart_speed_control_req = 0;
- tape->pipeline_head = 0;
- tape->controlled_last_pipeline_head = 0;
- tape->controlled_previous_pipeline_head = 0;
- tape->uncontrolled_previous_pipeline_head = 0;
- tape->controlled_pipeline_head_speed = 5000;
- tape->pipeline_head_speed = 5000;
- tape->uncontrolled_pipeline_head_speed = 0;
- tape->controlled_pipeline_head_time =
- tape->uncontrolled_pipeline_head_time = jiffies;
- tape->controlled_previous_head_time =
- tape->uncontrolled_previous_head_time = jiffies;
-}
-
-static int idetape_init_read(ide_drive_t *drive, int max_stages)
-{
- idetape_tape_t *tape = drive->driver_data;
- idetape_stage_t *new_stage;
- struct request rq;
int bytes_read;
- u16 blocks = *(u16 *)&tape->caps[12];
/* Initialize read operation */
if (tape->chrdev_dir != IDETAPE_DIR_READ) {
if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
- idetape_empty_write_pipeline(drive);
+ ide_tape_flush_merge_buffer(drive);
idetape_flush_tape_buffers(drive);
}
- if (tape->merge_stage || tape->merge_stage_size) {
- printk(KERN_ERR "ide-tape: merge_stage_size should be"
+ if (tape->merge_bh || tape->merge_bh_size) {
+ printk(KERN_ERR "ide-tape: merge_bh_size should be"
" 0 now\n");
- tape->merge_stage_size = 0;
+ tape->merge_bh_size = 0;
}
- tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
- if (!tape->merge_stage)
+ tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
+ if (!tape->merge_bh)
return -ENOMEM;
tape->chrdev_dir = IDETAPE_DIR_READ;
@@ -2431,54 +1853,23 @@ static int idetape_init_read(ide_drive_t *drive, int max_stages)
if (drive->dsc_overlap) {
bytes_read = idetape_queue_rw_tail(drive,
REQ_IDETAPE_READ, 0,
- tape->merge_stage->bh);
+ tape->merge_bh);
if (bytes_read < 0) {
- __idetape_kfree_stage(tape->merge_stage);
- tape->merge_stage = NULL;
+ ide_tape_kfree_buffer(tape);
+ tape->merge_bh = NULL;
tape->chrdev_dir = IDETAPE_DIR_NONE;
return bytes_read;
}
}
}
- if (tape->restart_speed_control_req)
- idetape_restart_speed_control(drive);
- idetape_init_rq(&rq, REQ_IDETAPE_READ);
- rq.sector = tape->first_frame;
- rq.nr_sectors = blocks;
- rq.current_nr_sectors = blocks;
- if (!test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags) &&
- tape->nr_stages < max_stages) {
- new_stage = idetape_kmalloc_stage(tape);
- while (new_stage != NULL) {
- new_stage->rq = rq;
- idetape_add_stage_tail(drive, new_stage);
- if (tape->nr_stages >= max_stages)
- break;
- new_stage = idetape_kmalloc_stage(tape);
- }
- }
- if (!test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
- if (tape->nr_pending_stages >= 3 * max_stages / 4) {
- tape->measure_insert_time = 1;
- tape->insert_time = jiffies;
- tape->insert_size = 0;
- tape->insert_speed = 0;
- idetape_plug_pipeline(drive);
- }
- }
+
return 0;
}
-/*
- * Called from idetape_chrdev_read() to service a character device read request
- * and add read-ahead requests to our pipeline.
- */
+/* called from idetape_chrdev_read() to service a chrdev read request. */
static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
{
idetape_tape_t *tape = drive->driver_data;
- unsigned long flags;
- struct request *rq_ptr;
- int bytes_read;
debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
@@ -2486,39 +1877,10 @@ static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
return 0;
- /* Wait for the next block to reach the head of the pipeline. */
- idetape_init_read(drive, tape->max_stages);
- if (tape->first_stage == NULL) {
- if (test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
- return 0;
- return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
- tape->merge_stage->bh);
- }
- idetape_wait_first_stage(drive);
- rq_ptr = &tape->first_stage->rq;
- bytes_read = tape->blk_size * (rq_ptr->nr_sectors -
- rq_ptr->current_nr_sectors);
- rq_ptr->nr_sectors = 0;
- rq_ptr->current_nr_sectors = 0;
+ idetape_init_read(drive);
- if (rq_ptr->errors == IDETAPE_ERROR_EOD)
- return 0;
- else {
- idetape_switch_buffers(tape, tape->first_stage);
- if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
- set_bit(IDETAPE_FLAG_FILEMARK, &tape->flags);
- spin_lock_irqsave(&tape->lock, flags);
- idetape_remove_stage_head(drive);
- spin_unlock_irqrestore(&tape->lock, flags);
- tape->pipeline_head++;
- idetape_calculate_speeds(drive);
- }
- if (bytes_read > blocks * tape->blk_size) {
- printk(KERN_ERR "ide-tape: bug: trying to return more bytes"
- " than requested\n");
- bytes_read = blocks * tape->blk_size;
- }
- return (bytes_read);
+ return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
+ tape->merge_bh);
}
static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
@@ -2530,8 +1892,8 @@ static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
while (bcount) {
unsigned int count;
- bh = tape->merge_stage->bh;
- count = min(tape->stage_size, bcount);
+ bh = tape->merge_bh;
+ count = min(tape->buffer_size, bcount);
bcount -= count;
blocks = count / tape->blk_size;
while (count) {
@@ -2542,29 +1904,8 @@ static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
bh = bh->b_reqnext;
}
idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
- tape->merge_stage->bh);
- }
-}
-
-static int idetape_pipeline_size(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- idetape_stage_t *stage;
- struct request *rq;
- int size = 0;
-
- idetape_wait_for_pipeline(drive);
- stage = tape->first_stage;
- while (stage != NULL) {
- rq = &stage->rq;
- size += tape->blk_size * (rq->nr_sectors -
- rq->current_nr_sectors);
- if (rq->errors == IDETAPE_ERROR_FILEMARK)
- size += tape->blk_size;
- stage = stage->next;
+ tape->merge_bh);
}
- size += tape->merge_stage_size;
- return size;
}
/*
@@ -2612,11 +1953,10 @@ static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
if (copy_from_user(&config, argp, sizeof(config)))
return -EFAULT;
tape->best_dsc_rw_freq = config.dsc_rw_frequency;
- tape->max_stages = config.nr_stages;
break;
case 0x0350:
config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
- config.nr_stages = tape->max_stages;
+ config.nr_stages = 1;
if (copy_to_user(argp, &config, sizeof(config)))
return -EFAULT;
break;
@@ -2626,19 +1966,11 @@ static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
return 0;
}
-/*
- * The function below is now a bit more complicated than just passing the
- * command to the tape since we may have crossed some filemarks during our
- * pipelined read-ahead mode. As a minor side effect, the pipeline enables us to
- * support MTFSFM when the filemark is in our internal pipeline even if the tape
- * doesn't support spacing over filemarks in the reverse direction.
- */
static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
int mt_count)
{
idetape_tape_t *tape = drive->driver_data;
struct ide_atapi_pc pc;
- unsigned long flags;
int retval, count = 0;
int sprev = !!(tape->caps[4] & 0x20);
@@ -2651,48 +1983,12 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
}
if (tape->chrdev_dir == IDETAPE_DIR_READ) {
- /* its a read-ahead buffer, scan it for crossed filemarks. */
- tape->merge_stage_size = 0;
+ tape->merge_bh_size = 0;
if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
++count;
- while (tape->first_stage != NULL) {
- if (count == mt_count) {
- if (mt_op == MTFSFM)
- set_bit(IDETAPE_FLAG_FILEMARK,
- &tape->flags);
- return 0;
- }
- spin_lock_irqsave(&tape->lock, flags);
- if (tape->first_stage == tape->active_stage) {
- /*
- * We have reached the active stage in the read
- * pipeline. There is no point in allowing the
- * drive to continue reading any farther, so we
- * stop the pipeline.
- *
- * This section should be moved to a separate
- * subroutine because similar operations are
- * done in __idetape_discard_read_pipeline(),
- * for example.
- */
- tape->next_stage = NULL;
- spin_unlock_irqrestore(&tape->lock, flags);
- idetape_wait_first_stage(drive);
- tape->next_stage = tape->first_stage->next;
- } else
- spin_unlock_irqrestore(&tape->lock, flags);
- if (tape->first_stage->rq.errors ==
- IDETAPE_ERROR_FILEMARK)
- ++count;
- idetape_remove_stage_head(drive);
- }
- idetape_discard_read_pipeline(drive, 0);
+ ide_tape_discard_merge_buffer(drive, 0);
}
- /*
- * The filemark was not found in our internal pipeline; now we can issue
- * the space command.
- */
switch (mt_op) {
case MTFSF:
case MTBSF:
@@ -2748,27 +2044,25 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
(count % tape->blk_size) == 0)
tape->user_bs_factor = count / tape->blk_size;
}
- rc = idetape_init_read(drive, tape->max_stages);
+ rc = idetape_init_read(drive);
if (rc < 0)
return rc;
if (count == 0)
return (0);
- if (tape->merge_stage_size) {
- actually_read = min((unsigned int)(tape->merge_stage_size),
+ if (tape->merge_bh_size) {
+ actually_read = min((unsigned int)(tape->merge_bh_size),
(unsigned int)count);
- if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
- actually_read))
+ if (idetape_copy_stage_to_user(tape, buf, actually_read))
ret = -EFAULT;
buf += actually_read;
- tape->merge_stage_size -= actually_read;
+ tape->merge_bh_size -= actually_read;
count -= actually_read;
}
- while (count >= tape->stage_size) {
+ while (count >= tape->buffer_size) {
bytes_read = idetape_add_chrdev_read_request(drive, ctl);
if (bytes_read <= 0)
goto finish;
- if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
- bytes_read))
+ if (idetape_copy_stage_to_user(tape, buf, bytes_read))
ret = -EFAULT;
buf += bytes_read;
count -= bytes_read;
@@ -2779,11 +2073,10 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
if (bytes_read <= 0)
goto finish;
temp = min((unsigned long)count, (unsigned long)bytes_read);
- if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
- temp))
+ if (idetape_copy_stage_to_user(tape, buf, temp))
ret = -EFAULT;
actually_read += temp;
- tape->merge_stage_size = bytes_read-temp;
+ tape->merge_bh_size = bytes_read-temp;
}
finish:
if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) {
@@ -2814,17 +2107,17 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
/* Initialize write operation */
if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
if (tape->chrdev_dir == IDETAPE_DIR_READ)
- idetape_discard_read_pipeline(drive, 1);
- if (tape->merge_stage || tape->merge_stage_size) {
- printk(KERN_ERR "ide-tape: merge_stage_size "
+ ide_tape_discard_merge_buffer(drive, 1);
+ if (tape->merge_bh || tape->merge_bh_size) {
+ printk(KERN_ERR "ide-tape: merge_bh_size "
"should be 0 now\n");
- tape->merge_stage_size = 0;
+ tape->merge_bh_size = 0;
}
- tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
- if (!tape->merge_stage)
+ tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
+ if (!tape->merge_bh)
return -ENOMEM;
tape->chrdev_dir = IDETAPE_DIR_WRITE;
- idetape_init_merge_stage(tape);
+ idetape_init_merge_buffer(tape);
/*
* Issue a write 0 command to ensure that DSC handshake is
@@ -2835,10 +2128,10 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
if (drive->dsc_overlap) {
ssize_t retval = idetape_queue_rw_tail(drive,
REQ_IDETAPE_WRITE, 0,
- tape->merge_stage->bh);
+ tape->merge_bh);
if (retval < 0) {
- __idetape_kfree_stage(tape->merge_stage);
- tape->merge_stage = NULL;
+ ide_tape_kfree_buffer(tape);
+ tape->merge_bh = NULL;
tape->chrdev_dir = IDETAPE_DIR_NONE;
return retval;
}
@@ -2846,49 +2139,44 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
}
if (count == 0)
return (0);
- if (tape->restart_speed_control_req)
- idetape_restart_speed_control(drive);
- if (tape->merge_stage_size) {
- if (tape->merge_stage_size >= tape->stage_size) {
+ if (tape->merge_bh_size) {
+ if (tape->merge_bh_size >= tape->buffer_size) {
printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
- tape->merge_stage_size = 0;
+ tape->merge_bh_size = 0;
}
actually_written = min((unsigned int)
- (tape->stage_size - tape->merge_stage_size),
+ (tape->buffer_size - tape->merge_bh_size),
(unsigned int)count);
- if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
- actually_written))
+ if (idetape_copy_stage_from_user(tape, buf, actually_written))
ret = -EFAULT;
buf += actually_written;
- tape->merge_stage_size += actually_written;
+ tape->merge_bh_size += actually_written;
count -= actually_written;
- if (tape->merge_stage_size == tape->stage_size) {
+ if (tape->merge_bh_size == tape->buffer_size) {
ssize_t retval;
- tape->merge_stage_size = 0;
+ tape->merge_bh_size = 0;
retval = idetape_add_chrdev_write_request(drive, ctl);
if (retval <= 0)
return (retval);
}
}
- while (count >= tape->stage_size) {
+ while (count >= tape->buffer_size) {
ssize_t retval;
- if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
- tape->stage_size))
+ if (idetape_copy_stage_from_user(tape, buf, tape->buffer_size))
ret = -EFAULT;
- buf += tape->stage_size;
- count -= tape->stage_size;
+ buf += tape->buffer_size;
+ count -= tape->buffer_size;
retval = idetape_add_chrdev_write_request(drive, ctl);
- actually_written += tape->stage_size;
+ actually_written += tape->buffer_size;
if (retval <= 0)
return (retval);
}
if (count) {
actually_written += count;
- if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
- count))
+ if (idetape_copy_stage_from_user(tape, buf, count))
ret = -EFAULT;
- tape->merge_stage_size += count;
+ tape->merge_bh_size += count;
}
return ret ? ret : actually_written;
}
@@ -2912,8 +2200,7 @@ static int idetape_write_filemark(ide_drive_t *drive)
*
* Note: MTBSF and MTBSFM are not supported when the tape doesn't support
* spacing over filemarks in the reverse direction. In this case, MTFSFM is also
- * usually not supported (it is supported in the rare case in which we crossed
- * the filemark during our read-ahead pipelined operation mode).
+ * usually not supported.
*
* The following commands are currently not supported:
*
@@ -2929,7 +2216,6 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
mt_op, mt_count);
- /* Commands which need our pipelined read-ahead stages. */
switch (mt_op) {
case MTFSF:
case MTFSFM:
@@ -2946,7 +2232,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
case MTWEOF:
if (tape->write_prot)
return -EACCES;
- idetape_discard_read_pipeline(drive, 1);
+ ide_tape_discard_merge_buffer(drive, 1);
for (i = 0; i < mt_count; i++) {
retval = idetape_write_filemark(drive);
if (retval)
@@ -2954,12 +2240,12 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
}
return 0;
case MTREW:
- idetape_discard_read_pipeline(drive, 0);
+ ide_tape_discard_merge_buffer(drive, 0);
if (idetape_rewind_tape(drive))
return -EIO;
return 0;
case MTLOAD:
- idetape_discard_read_pipeline(drive, 0);
+ ide_tape_discard_merge_buffer(drive, 0);
idetape_create_load_unload_cmd(drive, &pc,
IDETAPE_LU_LOAD_MASK);
return idetape_queue_pc_tail(drive, &pc);
@@ -2974,7 +2260,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
if (!idetape_queue_pc_tail(drive, &pc))
tape->door_locked = DOOR_UNLOCKED;
}
- idetape_discard_read_pipeline(drive, 0);
+ ide_tape_discard_merge_buffer(drive, 0);
idetape_create_load_unload_cmd(drive, &pc,
!IDETAPE_LU_LOAD_MASK);
retval = idetape_queue_pc_tail(drive, &pc);
@@ -2982,10 +2268,10 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
return retval;
case MTNOP:
- idetape_discard_read_pipeline(drive, 0);
+ ide_tape_discard_merge_buffer(drive, 0);
return idetape_flush_tape_buffers(drive);
case MTRETEN:
- idetape_discard_read_pipeline(drive, 0);
+ ide_tape_discard_merge_buffer(drive, 0);
idetape_create_load_unload_cmd(drive, &pc,
IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
return idetape_queue_pc_tail(drive, &pc);
@@ -3007,11 +2293,11 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
return 0;
case MTSEEK:
- idetape_discard_read_pipeline(drive, 0);
+ ide_tape_discard_merge_buffer(drive, 0);
return idetape_position_tape(drive,
mt_count * tape->user_bs_factor, tape->partition, 0);
case MTSETPART:
- idetape_discard_read_pipeline(drive, 0);
+ ide_tape_discard_merge_buffer(drive, 0);
return idetape_position_tape(drive, 0, mt_count, 0);
case MTFSR:
case MTBSR:
@@ -3056,13 +2342,12 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd);
- tape->restart_speed_control_req = 1;
if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
- idetape_empty_write_pipeline(drive);
+ ide_tape_flush_merge_buffer(drive);
idetape_flush_tape_buffers(drive);
}
if (cmd == MTIOCGET || cmd == MTIOCPOS) {
- block_offset = idetape_pipeline_size(drive) /
+ block_offset = tape->merge_bh_size /
(tape->blk_size * tape->user_bs_factor);
position = idetape_read_position(drive);
if (position < 0)
@@ -3094,7 +2379,7 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
return 0;
default:
if (tape->chrdev_dir == IDETAPE_DIR_READ)
- idetape_discard_read_pipeline(drive, 1);
+ ide_tape_discard_merge_buffer(drive, 1);
return idetape_blkdev_ioctl(drive, cmd, arg);
}
}
@@ -3168,9 +2453,6 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags))
(void)idetape_rewind_tape(drive);
- if (tape->chrdev_dir != IDETAPE_DIR_READ)
- clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
-
/* Read block size and write protect status from drive. */
ide_tape_get_bsize_from_bdesc(drive);
@@ -3199,8 +2481,6 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
}
}
}
- idetape_restart_speed_control(drive);
- tape->restart_speed_control_req = 0;
return 0;
out_put_tape:
@@ -3212,13 +2492,13 @@ static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
{
idetape_tape_t *tape = drive->driver_data;
- idetape_empty_write_pipeline(drive);
- tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0);
- if (tape->merge_stage != NULL) {
+ ide_tape_flush_merge_buffer(drive);
+ tape->merge_bh = ide_tape_kmalloc_buffer(tape, 1, 0);
+ if (tape->merge_bh != NULL) {
idetape_pad_zeros(drive, tape->blk_size *
(tape->user_bs_factor - 1));
- __idetape_kfree_stage(tape->merge_stage);
- tape->merge_stage = NULL;
+ ide_tape_kfree_buffer(tape);
+ tape->merge_bh = NULL;
}
idetape_write_filemark(drive);
idetape_flush_tape_buffers(drive);
@@ -3241,14 +2521,9 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
idetape_write_release(drive, minor);
if (tape->chrdev_dir == IDETAPE_DIR_READ) {
if (minor < 128)
- idetape_discard_read_pipeline(drive, 1);
- else
- idetape_wait_for_pipeline(drive);
- }
- if (tape->cache_stage != NULL) {
- __idetape_kfree_stage(tape->cache_stage);
- tape->cache_stage = NULL;
+ ide_tape_discard_merge_buffer(drive, 1);
}
+
if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags))
(void) idetape_rewind_tape(drive);
if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
@@ -3385,33 +2660,15 @@ static void idetape_add_settings(ide_drive_t *drive)
ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff,
1, 2, (u16 *)&tape->caps[16], NULL);
- ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff,
- tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
- ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff,
- tape->stage_size / 1024, 1, &tape->max_stages, NULL);
- ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff,
- tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
- ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0,
- 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages,
- NULL);
- ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0,
- 0xffff, tape->stage_size / 1024, 1,
- &tape->nr_pending_stages, NULL);
ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff,
1, 1, (u16 *)&tape->caps[14], NULL);
- ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1,
- 1024, &tape->stage_size, NULL);
+ ide_add_setting(drive, "buffer_size", SETTING_READ, TYPE_INT, 0, 0xffff,
+ 1, 1024, &tape->buffer_size, NULL);
ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN,
IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq,
NULL);
ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1,
1, &drive->dsc_overlap, NULL);
- ide_add_setting(drive, "pipeline_head_speed_c", SETTING_READ, TYPE_INT,
- 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed,
- NULL);
- ide_add_setting(drive, "pipeline_head_speed_u", SETTING_READ, TYPE_INT,
- 0, 0xffff, 1, 1,
- &tape->uncontrolled_pipeline_head_speed, NULL);
ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff,
1, 1, &tape->avg_speed, NULL);
ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1,
@@ -3434,11 +2691,10 @@ static inline void idetape_add_settings(ide_drive_t *drive) { ; }
*/
static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
{
- unsigned long t1, tmid, tn, t;
+ unsigned long t;
int speed;
- int stage_size;
+ int buffer_size;
u8 gcw[2];
- struct sysinfo si;
u16 *ctl = (u16 *)&tape->caps[12];
spin_lock_init(&tape->lock);
@@ -3457,65 +2713,33 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
tape->name[2] = '0' + minor;
tape->chrdev_dir = IDETAPE_DIR_NONE;
tape->pc = tape->pc_stack;
- tape->max_insert_speed = 10000;
- tape->speed_control = 1;
*((unsigned short *) &gcw) = drive->id->config;
/* Command packet DRQ type */
if (((gcw[0] & 0x60) >> 5) == 1)
set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags);
- tape->min_pipeline = 10;
- tape->max_pipeline = 10;
- tape->max_stages = 10;
-
idetape_get_inquiry_results(drive);
idetape_get_mode_sense_results(drive);
ide_tape_get_bsize_from_bdesc(drive);
tape->user_bs_factor = 1;
- tape->stage_size = *ctl * tape->blk_size;
- while (tape->stage_size > 0xffff) {
+ tape->buffer_size = *ctl * tape->blk_size;
+ while (tape->buffer_size > 0xffff) {
printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
*ctl /= 2;
- tape->stage_size = *ctl * tape->blk_size;
+ tape->buffer_size = *ctl * tape->blk_size;
}
- stage_size = tape->stage_size;
- tape->pages_per_stage = stage_size / PAGE_SIZE;
- if (stage_size % PAGE_SIZE) {
- tape->pages_per_stage++;
- tape->excess_bh_size = PAGE_SIZE - stage_size % PAGE_SIZE;
+ buffer_size = tape->buffer_size;
+ tape->pages_per_buffer = buffer_size / PAGE_SIZE;
+ if (buffer_size % PAGE_SIZE) {
+ tape->pages_per_buffer++;
+ tape->excess_bh_size = PAGE_SIZE - buffer_size % PAGE_SIZE;
}
- /* Select the "best" DSC read/write polling freq and pipeline size. */
+ /* select the "best" DSC read/write polling freq */
speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
- tape->max_stages = speed * 1000 * 10 / tape->stage_size;
-
- /* Limit memory use for pipeline to 10% of physical memory */
- si_meminfo(&si);
- if (tape->max_stages * tape->stage_size >
- si.totalram * si.mem_unit / 10)
- tape->max_stages =
- si.totalram * si.mem_unit / (10 * tape->stage_size);
-
- tape->max_stages = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES);
- tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES);
- tape->max_pipeline =
- min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
- if (tape->max_stages == 0) {
- tape->max_stages = 1;
- tape->min_pipeline = 1;
- tape->max_pipeline = 1;
- }
-
- t1 = (tape->stage_size * HZ) / (speed * 1000);
- tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125);
- tn = (IDETAPE_FIFO_THRESHOLD * tape->stage_size * HZ) / (speed * 1000);
-
- if (tape->max_stages)
- t = tn;
- else
- t = t1;
+ t = (IDETAPE_FIFO_THRESHOLD * tape->buffer_size * HZ) / (speed * 1000);
/*
* Ensure that the number we got makes sense; limit it within
@@ -3525,11 +2749,10 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
min_t(unsigned long, t, IDETAPE_DSC_RW_MAX),
IDETAPE_DSC_RW_MIN);
printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
- "%dkB pipeline, %lums tDSC%s\n",
+ "%lums tDSC%s\n",
drive->name, tape->name, *(u16 *)&tape->caps[14],
- (*(u16 *)&tape->caps[16] * 512) / tape->stage_size,
- tape->stage_size / 1024,
- tape->max_stages * tape->stage_size / 1024,
+ (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size,
+ tape->buffer_size / 1024,
tape->best_dsc_rw_freq * 1000 / HZ,
drive->using_dma ? ", DMA":"");
@@ -3553,7 +2776,7 @@ static void ide_tape_release(struct kref *kref)
ide_drive_t *drive = tape->drive;
struct gendisk *g = tape->disk;
- BUG_ON(tape->first_stage != NULL || tape->merge_stage_size);
+ BUG_ON(tape->merge_bh_size);
drive->dsc_overlap = 0;
drive->driver_data = NULL;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index a317ca9c46e5..9f9ad9fb6b89 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -36,6 +36,7 @@
void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
{
ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
struct ide_taskfile *tf = &task->tf;
u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
@@ -59,34 +60,33 @@ void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
SELECT_MASK(drive, 0);
if (task->tf_flags & IDE_TFLAG_OUT_DATA)
- hwif->OUTW((tf->hob_data << 8) | tf->data,
- hwif->io_ports[IDE_DATA_OFFSET]);
+ hwif->OUTW((tf->hob_data << 8) | tf->data, io_ports->data_addr);
if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
- hwif->OUTB(tf->hob_feature, hwif->io_ports[IDE_FEATURE_OFFSET]);
+ hwif->OUTB(tf->hob_feature, io_ports->feature_addr);
if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
- hwif->OUTB(tf->hob_nsect, hwif->io_ports[IDE_NSECTOR_OFFSET]);
+ hwif->OUTB(tf->hob_nsect, io_ports->nsect_addr);
if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
- hwif->OUTB(tf->hob_lbal, hwif->io_ports[IDE_SECTOR_OFFSET]);
+ hwif->OUTB(tf->hob_lbal, io_ports->lbal_addr);
if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
- hwif->OUTB(tf->hob_lbam, hwif->io_ports[IDE_LCYL_OFFSET]);
+ hwif->OUTB(tf->hob_lbam, io_ports->lbam_addr);
if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
- hwif->OUTB(tf->hob_lbah, hwif->io_ports[IDE_HCYL_OFFSET]);
+ hwif->OUTB(tf->hob_lbah, io_ports->lbah_addr);
if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
- hwif->OUTB(tf->feature, hwif->io_ports[IDE_FEATURE_OFFSET]);
+ hwif->OUTB(tf->feature, io_ports->feature_addr);
if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
- hwif->OUTB(tf->nsect, hwif->io_ports[IDE_NSECTOR_OFFSET]);
+ hwif->OUTB(tf->nsect, io_ports->nsect_addr);
if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
- hwif->OUTB(tf->lbal, hwif->io_ports[IDE_SECTOR_OFFSET]);
+ hwif->OUTB(tf->lbal, io_ports->lbal_addr);
if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
- hwif->OUTB(tf->lbam, hwif->io_ports[IDE_LCYL_OFFSET]);
+ hwif->OUTB(tf->lbam, io_ports->lbam_addr);
if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
- hwif->OUTB(tf->lbah, hwif->io_ports[IDE_HCYL_OFFSET]);
+ hwif->OUTB(tf->lbah, io_ports->lbah_addr);
if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
hwif->OUTB((tf->device & HIHI) | drive->select.all,
- hwif->io_ports[IDE_SELECT_OFFSET]);
+ io_ports->device_addr);
}
int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
@@ -155,8 +155,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
switch (task->data_phase) {
case TASKFILE_MULTI_OUT:
case TASKFILE_OUT:
- hwif->OUTBSYNC(drive, tf->command,
- hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTBSYNC(drive, tf->command, hwif->io_ports.command_addr);
ndelay(400); /* FIXME */
return pre_task_out_intr(drive, task->rq);
case TASKFILE_MULTI_IN:
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index bced02f9f2c3..999584c03d97 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -94,12 +94,6 @@ DEFINE_MUTEX(ide_cfg_mtx);
int noautodma = 0;
-#ifdef CONFIG_BLK_DEV_IDEACPI
-int ide_noacpi = 0;
-int ide_noacpitfs = 1;
-int ide_noacpionboot = 1;
-#endif
-
ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */
static void ide_port_init_devices_data(ide_hwif_t *);
@@ -293,7 +287,7 @@ EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
/**
* ide_unregister - free an IDE interface
- * @index: index of interface (will change soon to a pointer)
+ * @hwif: IDE interface
*
* Perform the final unregister of an IDE interface. At the moment
* we don't refcount interfaces so this will also get split up.
@@ -313,19 +307,16 @@ EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
* This is raving bonkers.
*/
-void ide_unregister(unsigned int index)
+void ide_unregister(ide_hwif_t *hwif)
{
- ide_hwif_t *hwif, *g;
+ ide_hwif_t *g;
ide_hwgroup_t *hwgroup;
int irq_count = 0;
- BUG_ON(index >= MAX_HWIFS);
-
BUG_ON(in_interrupt());
BUG_ON(irqs_disabled());
mutex_lock(&ide_cfg_mtx);
spin_lock_irq(&ide_lock);
- hwif = &ide_hwifs[index];
if (!hwif->present)
goto abort;
__ide_port_unregister_devices(hwif);
@@ -366,7 +357,7 @@ void ide_unregister(unsigned int index)
ide_release_dma_engine(hwif);
/* restore hwif data to pristine status */
- ide_init_port_data(hwif, index);
+ ide_init_port_data(hwif, hwif->index);
abort:
spin_unlock_irq(&ide_lock);
@@ -377,7 +368,7 @@ EXPORT_SYMBOL(ide_unregister);
void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
{
- memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));
+ memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
hwif->irq = hw->irq;
hwif->chipset = hw->chipset;
hwif->gendev.parent = hw->dev;
@@ -837,16 +828,6 @@ static int __init match_parm (char *s, const char *keywords[], int vals[], int m
return 0; /* zero = nothing matched */
}
-extern int probe_ali14xx;
-extern int probe_umc8672;
-extern int probe_dtc2278;
-extern int probe_ht6560b;
-extern int probe_qd65xx;
-extern int cmd640_vlb;
-extern int probe_4drives;
-
-static int __initdata is_chipset_set;
-
/*
* ide_setup() gets called VERY EARLY during initialization,
* to handle kernel "command line" strings beginning with "hdx=" or "ide".
@@ -855,14 +836,12 @@ static int __initdata is_chipset_set;
*/
static int __init ide_setup(char *s)
{
- int i, vals[3];
ide_hwif_t *hwif;
ide_drive_t *drive;
unsigned int hw, unit;
+ int vals[3];
const char max_drive = 'a' + ((MAX_HWIFS * MAX_DRIVES) - 1);
- const char max_hwif = '0' + (MAX_HWIFS - 1);
-
if (strncmp(s,"hd",2) == 0 && s[2] == '=') /* hd= is for hd.c */
return 0; /* driver and not us */
@@ -878,7 +857,7 @@ static int __init ide_setup(char *s)
printk(" : Enabled support for IDE doublers\n");
ide_doubler = 1;
- return 1;
+ goto obsolete_option;
}
#endif /* CONFIG_BLK_DEV_IDEDOUBLER */
@@ -892,17 +871,17 @@ static int __init ide_setup(char *s)
if (!strcmp(s, "ide=noacpi")) {
//printk(" : Disable IDE ACPI support.\n");
ide_noacpi = 1;
- return 1;
+ goto obsolete_option;
}
if (!strcmp(s, "ide=acpigtf")) {
//printk(" : Enable IDE ACPI _GTF support.\n");
- ide_noacpitfs = 0;
- return 1;
+ ide_acpigtf = 1;
+ goto obsolete_option;
}
if (!strcmp(s, "ide=acpionboot")) {
//printk(" : Call IDE ACPI methods on boot.\n");
- ide_noacpionboot = 0;
- return 1;
+ ide_acpionboot = 1;
+ goto obsolete_option;
}
#endif /* CONFIG_BLK_DEV_IDEACPI */
@@ -912,7 +891,7 @@ static int __init ide_setup(char *s)
if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) {
const char *hd_words[] = {
"none", "noprobe", "nowerr", "cdrom", "nodma",
- "autotune", "noautotune", "-8", "-9", "-10",
+ "-6", "-7", "-8", "-9", "-10",
"noflush", "remap", "remap63", "scsi", NULL };
unit = s[2] - 'a';
hw = unit / MAX_DRIVES;
@@ -927,28 +906,22 @@ static int __init ide_setup(char *s)
case -1: /* "none" */
case -2: /* "noprobe" */
drive->noprobe = 1;
- goto done;
+ goto obsolete_option;
case -3: /* "nowerr" */
drive->bad_wstat = BAD_R_STAT;
- goto done;
+ goto obsolete_option;
case -4: /* "cdrom" */
drive->present = 1;
drive->media = ide_cdrom;
/* an ATAPI device ignores DRDY */
drive->ready_stat = 0;
- goto done;
+ goto obsolete_option;
case -5: /* nodma */
drive->nodma = 1;
- goto done;
- case -6: /* "autotune" */
- drive->autotune = IDE_TUNE_AUTO;
- goto obsolete_option;
- case -7: /* "noautotune" */
- drive->autotune = IDE_TUNE_NOAUTO;
goto obsolete_option;
case -11: /* noflush */
drive->noflush = 1;
- goto done;
+ goto obsolete_option;
case -12: /* "remap" */
drive->remap_0_to_1 = 1;
goto obsolete_option;
@@ -966,7 +939,7 @@ static int __init ide_setup(char *s)
drive->sect = drive->bios_sect = vals[2];
drive->present = 1;
drive->forced_geom = 1;
- goto done;
+ goto obsolete_option;
default:
goto bad_option;
}
@@ -984,126 +957,15 @@ static int __init ide_setup(char *s)
idebus_parameter = vals[0];
} else
printk(" -- BAD BUS SPEED! Expected value from 20 to 66");
- goto done;
+ goto obsolete_option;
}
- /*
- * Look for interface options: "idex="
- */
- if (s[3] >= '0' && s[3] <= max_hwif) {
- /*
- * Be VERY CAREFUL changing this: note hardcoded indexes below
- * (-8, -9, -10) are reserved to ease the hardcoding.
- */
- static const char *ide_words[] = {
- "minus1", "serialize", "minus3", "minus4",
- "reset", "minus6", "ata66", "minus8", "minus9",
- "minus10", "four", "qd65xx", "ht6560b", "cmd640_vlb",
- "dtc2278", "umc8672", "ali14xx", NULL };
-
- hw = s[3] - '0';
- hwif = &ide_hwifs[hw];
- i = match_parm(&s[4], ide_words, vals, 3);
-
- /*
- * Cryptic check to ensure chipset not already set for hwif.
- * Note: we can't depend on hwif->chipset here.
- */
- if (i >= -18 && i <= -11) {
- /* chipset already specified */
- if (is_chipset_set)
- goto bad_option;
- /* these drivers are for "ide0=" only */
- if (hw != 0)
- goto bad_hwif;
- is_chipset_set = 1;
- printk("\n");
- }
-
- switch (i) {
-#ifdef CONFIG_BLK_DEV_ALI14XX
- case -17: /* "ali14xx" */
- probe_ali14xx = 1;
- goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_UMC8672
- case -16: /* "umc8672" */
- probe_umc8672 = 1;
- goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_DTC2278
- case -15: /* "dtc2278" */
- probe_dtc2278 = 1;
- goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_CMD640
- case -14: /* "cmd640_vlb" */
- cmd640_vlb = 1;
- goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_HT6560B
- case -13: /* "ht6560b" */
- probe_ht6560b = 1;
- goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_QD65XX
- case -12: /* "qd65xx" */
- probe_qd65xx = 1;
- goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_4DRIVES
- case -11: /* "four" drives on one set of ports */
- probe_4drives = 1;
- goto obsolete_option;
-#endif
- case -10: /* minus10 */
- case -9: /* minus9 */
- case -8: /* minus8 */
- case -6:
- case -4:
- case -3:
- goto bad_option;
- case -7: /* ata66 */
-#ifdef CONFIG_BLK_DEV_IDEPCI
- /*
- * Use ATA_CBL_PATA40_SHORT so drive side
- * cable detection is also overriden.
- */
- hwif->cbl = ATA_CBL_PATA40_SHORT;
- goto obsolete_option;
-#else
- goto bad_hwif;
-#endif
- case -5: /* "reset" */
- hwif->reset = 1;
- goto obsolete_option;
- case -2: /* "serialize" */
- hwif->mate = &ide_hwifs[hw^1];
- hwif->mate->mate = hwif;
- hwif->serialized = hwif->mate->serialized = 1;
- goto obsolete_option;
- case -1:
- case 0:
- case 1:
- case 2:
- case 3:
- goto bad_option;
- default:
- printk(" -- SUPPORT NOT CONFIGURED IN THIS KERNEL\n");
- return 1;
- }
- }
bad_option:
printk(" -- BAD OPTION\n");
return 1;
obsolete_option:
printk(" -- OBSOLETE OPTION, WILL BE REMOVED SOON!\n");
return 1;
-bad_hwif:
- printk("-- NOT SUPPORTED ON ide%d", hw);
-done:
- printk("\n");
- return 1;
}
EXPORT_SYMBOL(ide_lock);
@@ -1239,6 +1101,185 @@ static void ide_port_class_release(struct device *portdev)
put_device(&hwif->gendev);
}
+int ide_vlb_clk;
+EXPORT_SYMBOL_GPL(ide_vlb_clk);
+
+module_param_named(vlb_clock, ide_vlb_clk, int, 0);
+MODULE_PARM_DESC(vlb_clock, "VLB clock frequency (in MHz)");
+
+int ide_pci_clk;
+EXPORT_SYMBOL_GPL(ide_pci_clk);
+
+module_param_named(pci_clock, ide_pci_clk, int, 0);
+MODULE_PARM_DESC(pci_clock, "PCI bus clock frequency (in MHz)");
+
+static int ide_set_dev_param_mask(const char *s, struct kernel_param *kp)
+{
+ int a, b, i, j = 1;
+ unsigned int *dev_param_mask = (unsigned int *)kp->arg;
+
+ if (sscanf(s, "%d.%d:%d", &a, &b, &j) != 3 &&
+ sscanf(s, "%d.%d", &a, &b) != 2)
+ return -EINVAL;
+
+ i = a * MAX_DRIVES + b;
+
+ if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1)
+ return -EINVAL;
+
+ if (j)
+ *dev_param_mask |= (1 << i);
+ else
+ *dev_param_mask &= (1 << i);
+
+ return 0;
+}
+
+static unsigned int ide_nodma;
+
+module_param_call(nodma, ide_set_dev_param_mask, NULL, &ide_nodma, 0);
+MODULE_PARM_DESC(nodma, "disallow DMA for a device");
+
+static unsigned int ide_noflush;
+
+module_param_call(noflush, ide_set_dev_param_mask, NULL, &ide_noflush, 0);
+MODULE_PARM_DESC(noflush, "disable flush requests for a device");
+
+static unsigned int ide_noprobe;
+
+module_param_call(noprobe, ide_set_dev_param_mask, NULL, &ide_noprobe, 0);
+MODULE_PARM_DESC(noprobe, "skip probing for a device");
+
+static unsigned int ide_nowerr;
+
+module_param_call(nowerr, ide_set_dev_param_mask, NULL, &ide_nowerr, 0);
+MODULE_PARM_DESC(nowerr, "ignore the WRERR_STAT bit for a device");
+
+static unsigned int ide_cdroms;
+
+module_param_call(cdrom, ide_set_dev_param_mask, NULL, &ide_cdroms, 0);
+MODULE_PARM_DESC(cdrom, "force device as a CD-ROM");
+
+struct chs_geom {
+ unsigned int cyl;
+ u8 head;
+ u8 sect;
+};
+
+static unsigned int ide_disks;
+static struct chs_geom ide_disks_chs[MAX_HWIFS * MAX_DRIVES];
+
+static int ide_set_disk_chs(const char *str, struct kernel_param *kp)
+{
+ int a, b, c = 0, h = 0, s = 0, i, j = 1;
+
+ if (sscanf(str, "%d.%d:%d,%d,%d", &a, &b, &c, &h, &s) != 5 &&
+ sscanf(str, "%d.%d:%d", &a, &b, &j) != 3)
+ return -EINVAL;
+
+ i = a * MAX_DRIVES + b;
+
+ if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1)
+ return -EINVAL;
+
+ if (c > INT_MAX || h > 255 || s > 255)
+ return -EINVAL;
+
+ if (j)
+ ide_disks |= (1 << i);
+ else
+ ide_disks &= (1 << i);
+
+ ide_disks_chs[i].cyl = c;
+ ide_disks_chs[i].head = h;
+ ide_disks_chs[i].sect = s;
+
+ return 0;
+}
+
+module_param_call(chs, ide_set_disk_chs, NULL, NULL, 0);
+MODULE_PARM_DESC(chs, "force device as a disk (using CHS)");
+
+static void ide_dev_apply_params(ide_drive_t *drive)
+{
+ int i = drive->hwif->index * MAX_DRIVES + drive->select.b.unit;
+
+ if (ide_nodma & (1 << i)) {
+ printk(KERN_INFO "ide: disallowing DMA for %s\n", drive->name);
+ drive->nodma = 1;
+ }
+ if (ide_noflush & (1 << i)) {
+ printk(KERN_INFO "ide: disabling flush requests for %s\n",
+ drive->name);
+ drive->noflush = 1;
+ }
+ if (ide_noprobe & (1 << i)) {
+ printk(KERN_INFO "ide: skipping probe for %s\n", drive->name);
+ drive->noprobe = 1;
+ }
+ if (ide_nowerr & (1 << i)) {
+ printk(KERN_INFO "ide: ignoring the WRERR_STAT bit for %s\n",
+ drive->name);
+ drive->bad_wstat = BAD_R_STAT;
+ }
+ if (ide_cdroms & (1 << i)) {
+ printk(KERN_INFO "ide: forcing %s as a CD-ROM\n", drive->name);
+ drive->present = 1;
+ drive->media = ide_cdrom;
+ /* an ATAPI device ignores DRDY */
+ drive->ready_stat = 0;
+ }
+ if (ide_disks & (1 << i)) {
+ drive->cyl = drive->bios_cyl = ide_disks_chs[i].cyl;
+ drive->head = drive->bios_head = ide_disks_chs[i].head;
+ drive->sect = drive->bios_sect = ide_disks_chs[i].sect;
+ drive->forced_geom = 1;
+ printk(KERN_INFO "ide: forcing %s as a disk (%d/%d/%d)\n",
+ drive->name,
+ drive->cyl, drive->head, drive->sect);
+ drive->present = 1;
+ drive->media = ide_disk;
+ drive->ready_stat = READY_STAT;
+ }
+}
+
+static unsigned int ide_ignore_cable;
+
+static int ide_set_ignore_cable(const char *s, struct kernel_param *kp)
+{
+ int i, j = 1;
+
+ if (sscanf(s, "%d:%d", &i, &j) != 2 && sscanf(s, "%d", &i) != 1)
+ return -EINVAL;
+
+ if (i >= MAX_HWIFS || j < 0 || j > 1)
+ return -EINVAL;
+
+ if (j)
+ ide_ignore_cable |= (1 << i);
+ else
+ ide_ignore_cable &= (1 << i);
+
+ return 0;
+}
+
+module_param_call(ignore_cable, ide_set_ignore_cable, NULL, NULL, 0);
+MODULE_PARM_DESC(ignore_cable, "ignore cable detection");
+
+void ide_port_apply_params(ide_hwif_t *hwif)
+{
+ int i;
+
+ if (ide_ignore_cable & (1 << hwif->index)) {
+ printk(KERN_INFO "ide: ignoring cable detection for %s\n",
+ hwif->name);
+ hwif->cbl = ATA_CBL_PATA40_SHORT;
+ }
+
+ for (i = 0; i < MAX_DRIVES; i++)
+ ide_dev_apply_params(&hwif->drives[i]);
+}
+
/*
* This is gets invoked once during initialization, to set *everything* up
*/
@@ -1305,11 +1346,6 @@ int __init init_module (void)
void __exit cleanup_module (void)
{
- int index;
-
- for (index = 0; index < MAX_HWIFS; ++index)
- ide_unregister(index);
-
proc_ide_destroy();
class_destroy(ide_port_class);
diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c
index 6efbf947c6db..90c65cf97448 100644
--- a/drivers/ide/legacy/ali14xx.c
+++ b/drivers/ide/legacy/ali14xx.c
@@ -116,7 +116,7 @@ static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
int time1, time2;
u8 param1, param2, param3, param4;
unsigned long flags;
- int bus_speed = system_bus_clock();
+ int bus_speed = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
/* calculate timing, according to PIO mode */
time1 = ide_pio_cycle_time(drive, pio);
@@ -202,7 +202,7 @@ static const struct ide_port_info ali14xx_port_info = {
.name = DRV_NAME,
.chipset = ide_ali14xx,
.port_ops = &ali14xx_port_ops,
- .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_NO_AUTOTUNE,
+ .host_flags = IDE_HFLAG_NO_DMA,
.pio_mask = ATA_PIO4,
};
@@ -220,7 +220,7 @@ static int __init ali14xx_probe(void)
return ide_legacy_device_add(&ali14xx_port_info, 0);
}
-int probe_ali14xx;
+static int probe_ali14xx;
module_param_named(probe, probe_ali14xx, bool, 0);
MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets");
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c
index f51433bce8e4..5c730e4dd735 100644
--- a/drivers/ide/legacy/buddha.c
+++ b/drivers/ide/legacy/buddha.c
@@ -102,7 +102,7 @@ static int buddha_ack_intr(ide_hwif_t *hwif)
{
unsigned char ch;
- ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]);
+ ch = z_readb(hwif->io_ports.irq_addr);
if (!(ch & 0x80))
return 0;
return 1;
@@ -112,9 +112,9 @@ static int xsurf_ack_intr(ide_hwif_t *hwif)
{
unsigned char ch;
- ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]);
+ ch = z_readb(hwif->io_ports.irq_addr);
/* X-Surf needs a 0 written to IRQ register to ensure ISA bit A11 stays at 0 */
- z_writeb(0, hwif->io_ports[IDE_IRQ_OFFSET]);
+ z_writeb(0, hwif->io_ports.irq_addr);
if (!(ch & 0x80))
return 0;
return 1;
@@ -128,13 +128,13 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base,
memset(hw, 0, sizeof(*hw));
- hw->io_ports[IDE_DATA_OFFSET] = base;
+ hw->io_ports.data_addr = base;
for (i = 1; i < 8; i++)
- hw->io_ports[i] = base + 2 + i * 4;
+ hw->io_ports_array[i] = base + 2 + i * 4;
- hw->io_ports[IDE_CONTROL_OFFSET] = ctl;
- hw->io_ports[IDE_IRQ_OFFSET] = irq_port;
+ hw->io_ports.ctl_addr = ctl;
+ hw->io_ports.irq_addr = irq_port;
hw->irq = IRQ_AMIGA_PORTS;
hw->ack_intr = ack_intr;
diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/legacy/dtc2278.c
index f7c4ad1c57c0..af791a02a120 100644
--- a/drivers/ide/legacy/dtc2278.c
+++ b/drivers/ide/legacy/dtc2278.c
@@ -101,8 +101,7 @@ static const struct ide_port_info dtc2278_port_info __initdata = {
IDE_HFLAG_IO_32BIT |
/* disallow ->io_32bit changes */
IDE_HFLAG_NO_IO_32BIT |
- IDE_HFLAG_NO_DMA |
- IDE_HFLAG_NO_AUTOTUNE,
+ IDE_HFLAG_NO_DMA,
.pio_mask = ATA_PIO4,
};
@@ -131,7 +130,7 @@ static int __init dtc2278_probe(void)
return ide_legacy_device_add(&dtc2278_port_info, 0);
}
-int probe_dtc2278 = 0;
+static int probe_dtc2278;
module_param_named(probe, probe_dtc2278, bool, 0);
MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets");
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index 5c19c422c5cc..56cdaa0eeea5 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -50,12 +50,12 @@ static void __init falconide_setup_ports(hw_regs_t *hw)
memset(hw, 0, sizeof(*hw));
- hw->io_ports[IDE_DATA_OFFSET] = ATA_HD_BASE;
+ hw->io_ports.data_addr = ATA_HD_BASE;
for (i = 1; i < 8; i++)
- hw->io_ports[i] = ATA_HD_BASE + 1 + i * 4;
+ hw->io_ports_array[i] = ATA_HD_BASE + 1 + i * 4;
- hw->io_ports[IDE_CONTROL_OFFSET] = ATA_HD_BASE + ATA_HD_CONTROL;
+ hw->io_ports.ctl_addr = ATA_HD_BASE + ATA_HD_CONTROL;
hw->irq = IRQ_MFP_IDE;
hw->ack_intr = NULL;
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
index a0c9601bdaf0..a9c2593a898c 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/legacy/gayle.c
@@ -63,6 +63,8 @@
#define GAYLE_HAS_CONTROL_REG (!ide_doubler)
#define GAYLE_IDEREG_SIZE (ide_doubler ? 0x1000 : 0x2000)
int ide_doubler = 0; /* support IDE doublers? */
+module_param_named(doubler, ide_doubler, bool, 0);
+MODULE_PARM_DESC(doubler, "enable support for IDE doublers");
#endif /* CONFIG_BLK_DEV_IDEDOUBLER */
@@ -74,7 +76,7 @@ static int gayle_ack_intr_a4000(ide_hwif_t *hwif)
{
unsigned char ch;
- ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]);
+ ch = z_readb(hwif->io_ports.irq_addr);
if (!(ch & GAYLE_IRQ_IDE))
return 0;
return 1;
@@ -84,11 +86,11 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif)
{
unsigned char ch;
- ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]);
+ ch = z_readb(hwif->io_ports.irq_addr);
if (!(ch & GAYLE_IRQ_IDE))
return 0;
- (void)z_readb(hwif->io_ports[IDE_STATUS_OFFSET]);
- z_writeb(0x7c, hwif->io_ports[IDE_IRQ_OFFSET]);
+ (void)z_readb(hwif->io_ports.status_addr);
+ z_writeb(0x7c, hwif->io_ports.irq_addr);
return 1;
}
@@ -100,13 +102,13 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
memset(hw, 0, sizeof(*hw));
- hw->io_ports[IDE_DATA_OFFSET] = base;
+ hw->io_ports.data_addr = base;
for (i = 1; i < 8; i++)
- hw->io_ports[i] = base + 2 + i * 4;
+ hw->io_ports_array[i] = base + 2 + i * 4;
- hw->io_ports[IDE_CONTROL_OFFSET] = ctl;
- hw->io_ports[IDE_IRQ_OFFSET] = irq_port;
+ hw->io_ports.ctl_addr = ctl;
+ hw->io_ports.irq_addr = irq_port;
hw->irq = IRQ_AMIGA_PORTS;
hw->ack_intr = ack_intr;
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c
index 702d8deb5780..4fe516df9f74 100644
--- a/drivers/ide/legacy/ht6560b.c
+++ b/drivers/ide/legacy/ht6560b.c
@@ -157,8 +157,8 @@ static void ht6560b_selectproc (ide_drive_t *drive)
/*
* Set timing for this drive:
*/
- outb(timing, hwif->io_ports[IDE_SELECT_OFFSET]);
- (void)inb(hwif->io_ports[IDE_STATUS_OFFSET]);
+ outb(timing, hwif->io_ports.device_addr);
+ (void)inb(hwif->io_ports.status_addr);
#ifdef DEBUG
printk("ht6560b: %s: select=%#x timing=%#x\n",
drive->name, select, timing);
@@ -212,8 +212,8 @@ static u8 ht_pio2timings(ide_drive_t *drive, const u8 pio)
{
int active_time, recovery_time;
int active_cycles, recovery_cycles;
- int bus_speed = system_bus_clock();
-
+ int bus_speed = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
+
if (pio) {
unsigned int cycle_time;
@@ -323,7 +323,7 @@ static void __init ht6560b_port_init_devs(ide_hwif_t *hwif)
hwif->drives[1].drive_data = t;
}
-int probe_ht6560b = 0;
+static int probe_ht6560b;
module_param_named(probe, probe_ht6560b, bool, 0);
MODULE_PARM_DESC(probe, "probe for HT6560B chipset");
@@ -340,7 +340,6 @@ static const struct ide_port_info ht6560b_port_info __initdata = {
.port_ops = &ht6560b_port_ops,
.host_flags = IDE_HFLAG_SERIALIZE | /* is this needed? */
IDE_HFLAG_NO_DMA |
- IDE_HFLAG_NO_AUTOTUNE |
IDE_HFLAG_ABUSE_PREFETCH,
.pio_mask = ATA_PIO4,
};
diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/legacy/ide-4drives.c
index 17f94d0cb539..ecae916a3385 100644
--- a/drivers/ide/legacy/ide-4drives.c
+++ b/drivers/ide/legacy/ide-4drives.c
@@ -6,7 +6,7 @@
#define DRV_NAME "ide-4drives"
-int probe_4drives;
+static int probe_4drives;
module_param_named(probe, probe_4drives, bool, 0);
MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port");
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 855e157b18d3..aa2ea3deac85 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -140,8 +140,8 @@ static void ide_detach(struct pcmcia_device *link)
ide_release(link);
- release_region(hwif->io_ports[IDE_CONTROL_OFFSET], 1);
- release_region(hwif->io_ports[IDE_DATA_OFFSET], 8);
+ release_region(hwif->io_ports.ctl_addr, 1);
+ release_region(hwif->io_ports.data_addr, 8);
kfree(info);
} /* ide_detach */
@@ -183,11 +183,7 @@ static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl,
i = hwif->index;
- if (hwif->present)
- ide_unregister(i);
- else
- ide_init_port_data(hwif, i);
-
+ ide_init_port_data(hwif, i);
ide_init_port_hw(hwif, &hw);
hwif->port_ops = &idecs_port_ops;
@@ -390,7 +386,7 @@ void ide_release(struct pcmcia_device *link)
if (info->ndev) {
/* FIXME: if this fails we need to queue the cleanup somehow
-- need to investigate the required PCMCIA magic */
- ide_unregister(hwif->index);
+ ide_unregister(hwif);
}
info->ndev = 0;
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
index 822f48b05c70..8279dc7ca4c0 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/legacy/ide_platform.c
@@ -30,14 +30,14 @@ static void __devinit plat_ide_setup_ports(hw_regs_t *hw,
unsigned long port = (unsigned long)base;
int i;
- hw->io_ports[IDE_DATA_OFFSET] = port;
+ hw->io_ports.data_addr = port;
port += (1 << pdata->ioport_shift);
- for (i = IDE_ERROR_OFFSET; i <= IDE_STATUS_OFFSET;
+ for (i = 1; i <= 7;
i++, port += (1 << pdata->ioport_shift))
- hw->io_ports[i] = port;
+ hw->io_ports_array[i] = port;
- hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
+ hw->io_ports.ctl_addr = (unsigned long)ctrl;
hw->irq = irq;
@@ -120,7 +120,7 @@ static int __devexit plat_ide_remove(struct platform_device *pdev)
{
ide_hwif_t *hwif = pdev->dev.driver_data;
- ide_unregister(hwif->index);
+ ide_unregister(hwif);
return 0;
}
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
index 26546d0afc7f..1f527bbf8d96 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/legacy/macide.c
@@ -72,9 +72,9 @@ static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base,
memset(hw, 0, sizeof(*hw));
for (i = 0; i < 8; i++)
- hw->io_ports[i] = base + i * 4;
+ hw->io_ports_array[i] = base + i * 4;
- hw->io_ports[IDE_CONTROL_OFFSET] = base + IDE_CONTROL;
+ hw->io_ports.ctl_addr = base + IDE_CONTROL;
hw->irq = irq;
hw->ack_intr = ack_intr;
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
index f23999dd3d46..a3573d40b4b7 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/legacy/q40ide.c
@@ -80,10 +80,10 @@ void q40_ide_setup_ports ( hw_regs_t *hw,
for (i = 0; i < IDE_NR_PORTS; i++) {
/* BIG FAT WARNING:
assumption: only DATA port is ever used in 16 bit mode */
- if ( i==0 )
- hw->io_ports[i] = Q40_ISA_IO_W(base + offsets[i]);
+ if (i == 0)
+ hw->io_ports_array[i] = Q40_ISA_IO_W(base + offsets[i]);
else
- hw->io_ports[i] = Q40_ISA_IO_B(base + offsets[i]);
+ hw->io_ports_array[i] = Q40_ISA_IO_B(base + offsets[i]);
}
hw->irq = irq;
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c
index 15a99aae0cf9..6424af154325 100644
--- a/drivers/ide/legacy/qd65xx.c
+++ b/drivers/ide/legacy/qd65xx.c
@@ -11,11 +11,7 @@
*
* QDI QD6500/QD6580 EIDE controller fast support
*
- * Please set local bus speed using kernel parameter idebus
- * for example, "idebus=33" stands for 33Mhz VLbus
* To activate controller support, use "ide0=qd65xx"
- * To enable tuning, use "hda=autotune hdb=autotune"
- * To enable 2nd channel tuning (qd6580 only), use "hdc=autotune hdd=autotune"
*/
/*
@@ -114,17 +110,18 @@ static void qd65xx_select(ide_drive_t *drive)
static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery_time)
{
- u8 active_cycle,recovery_cycle;
+ int clk = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
+ u8 act_cyc, rec_cyc;
- if (system_bus_clock()<=33) {
- active_cycle = 9 - IDE_IN(active_time * system_bus_clock() / 1000 + 1, 2, 9);
- recovery_cycle = 15 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 0, 15);
+ if (clk <= 33) {
+ act_cyc = 9 - IDE_IN(active_time * clk / 1000 + 1, 2, 9);
+ rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 0, 15);
} else {
- active_cycle = 8 - IDE_IN(active_time * system_bus_clock() / 1000 + 1, 1, 8);
- recovery_cycle = 18 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 3, 18);
+ act_cyc = 8 - IDE_IN(active_time * clk / 1000 + 1, 1, 8);
+ rec_cyc = 18 - IDE_IN(recovery_time * clk / 1000 + 1, 3, 18);
}
- return((recovery_cycle<<4) | 0x08 | active_cycle);
+ return (rec_cyc << 4) | 0x08 | act_cyc;
}
/*
@@ -135,10 +132,13 @@ static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery
static u8 qd6580_compute_timing (int active_time, int recovery_time)
{
- u8 active_cycle = 17 - IDE_IN(active_time * system_bus_clock() / 1000 + 1, 2, 17);
- u8 recovery_cycle = 15 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 2, 15);
+ int clk = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
+ u8 act_cyc, rec_cyc;
- return((recovery_cycle<<4) | active_cycle);
+ act_cyc = 17 - IDE_IN(active_time * clk / 1000 + 1, 2, 17);
+ rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 2, 15);
+
+ return (rec_cyc << 4) | act_cyc;
}
/*
@@ -322,8 +322,7 @@ static const struct ide_port_info qd65xx_port_info __initdata = {
.name = DRV_NAME,
.chipset = ide_qd65xx,
.host_flags = IDE_HFLAG_IO_32BIT |
- IDE_HFLAG_NO_DMA |
- IDE_HFLAG_NO_AUTOTUNE,
+ IDE_HFLAG_NO_DMA,
.pio_mask = ATA_PIO4,
};
@@ -399,7 +398,7 @@ static int __init qd_probe(int base)
return rc;
}
-int probe_qd65xx = 0;
+static int probe_qd65xx;
module_param_named(probe, probe_qd65xx, bool, 0);
MODULE_PARM_DESC(probe, "probe for QD65xx chipsets");
diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/legacy/umc8672.c
index 17d515329fe0..b54a14a57755 100644
--- a/drivers/ide/legacy/umc8672.c
+++ b/drivers/ide/legacy/umc8672.c
@@ -130,7 +130,7 @@ static const struct ide_port_info umc8672_port_info __initdata = {
.name = DRV_NAME,
.chipset = ide_umc8672,
.port_ops = &umc8672_port_ops,
- .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_NO_AUTOTUNE,
+ .host_flags = IDE_HFLAG_NO_DMA,
.pio_mask = ATA_PIO4,
};
@@ -158,7 +158,7 @@ static int __init umc8672_probe(void)
return ide_legacy_device_add(&umc8672_port_info, 0);
}
-int probe_umc8672;
+static int probe_umc8672;
module_param_named(probe, probe_umc8672, bool, 0);
MODULE_PARM_DESC(probe, "probe for UMC8672 chipset");
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index 3485a310c95b..296b9c674bae 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -502,12 +502,11 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
{
int i;
- unsigned long *ata_regs = hw->io_ports;
+ unsigned long *ata_regs = hw->io_ports_array;
/* FIXME? */
- for (i = 0; i < IDE_CONTROL_OFFSET; i++) {
+ for (i = 0; i < 8; i++)
*ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);
- }
/* set the Alternative Status register */
*ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);
@@ -627,7 +626,7 @@ static int au_ide_remove(struct device *dev)
ide_hwif_t *hwif = dev_get_drvdata(dev);
_auide_hwif *ahwif = &auide_hwif;
- ide_unregister(hwif->index);
+ ide_unregister(hwif);
iounmap((void *)ahwif->regbase);
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
index 112fe566bb93..68947626e4aa 100644
--- a/drivers/ide/mips/swarm.c
+++ b/drivers/ide/mips/swarm.c
@@ -113,10 +113,10 @@ static int __devinit swarm_ide_probe(struct device *dev)
hwif->chipset = ide_generic;
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
- hwif->io_ports[i] =
+ for (i = 0; i <= 7; i++)
+ hwif->io_ports_array[i] =
(unsigned long)(base + ((0x1f0 + i) << 5));
- hwif->io_ports[IDE_CONTROL_OFFSET] =
+ hwif->io_ports.ctl_addr =
(unsigned long)(base + (0x3f6 << 5));
hwif->irq = K_INT_GB_IDE;
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index ca16f37f9486..7f46c224b7c4 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -140,7 +140,7 @@ static void aec_set_pio_mode(ide_drive_t *drive, const u8 pio)
static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const char *name)
{
- int bus_speed = system_bus_clock();
+ int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
if (bus_speed <= 33)
pci_set_drvdata(dev, (void *) aec6xxx_33_base);
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index b5a3bc33e167..b36a22b8c213 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -38,8 +38,6 @@
#include <asm/io.h>
-#define DISPLAY_ALI_TIMINGS
-
/*
* ALi devices are not plug in. Otherwise these static values would
* need to go. They ought to go away anyway
@@ -49,236 +47,6 @@ static u8 m5229_revision;
static u8 chip_is_1543c_e;
static struct pci_dev *isa_dev;
-#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
-#include <linux/stat.h>
-#include <linux/proc_fs.h>
-
-static u8 ali_proc = 0;
-
-static struct pci_dev *bmide_dev;
-
-static char *fifo[4] = {
- "FIFO Off",
- "FIFO On ",
- "DMA mode",
- "PIO mode" };
-
-static char *udmaT[8] = {
- "1.5T",
- " 2T",
- "2.5T",
- " 3T",
- "3.5T",
- " 4T",
- " 6T",
- " 8T"
-};
-
-static char *channel_status[8] = {
- "OK ",
- "busy ",
- "DRQ ",
- "DRQ busy ",
- "error ",
- "error busy ",
- "error DRQ ",
- "error DRQ busy"
-};
-
-/**
- * ali_get_info - generate proc file for ALi IDE
- * @buffer: buffer to fill
- * @addr: address of user start in buffer
- * @offset: offset into 'file'
- * @count: buffer count
- *
- * Walks the Ali devices and outputs summary data on the tuning and
- * anything else that will help with debugging
- */
-
-static int ali_get_info (char *buffer, char **addr, off_t offset, int count)
-{
- unsigned long bibma;
- u8 reg53h, reg5xh, reg5yh, reg5xh1, reg5yh1, c0, c1, rev, tmp;
- char *q, *p = buffer;
-
- /* fetch rev. */
- pci_read_config_byte(bmide_dev, 0x08, &rev);
- if (rev >= 0xc1) /* M1543C or newer */
- udmaT[7] = " ???";
- else
- fifo[3] = " ??? ";
-
- /* first fetch bibma: */
-
- bibma = pci_resource_start(bmide_dev, 4);
-
- /*
- * at that point bibma+0x2 et bibma+0xa are byte
- * registers to investigate:
- */
- c0 = inb(bibma + 0x02);
- c1 = inb(bibma + 0x0a);
-
- p += sprintf(p,
- "\n Ali M15x3 Chipset.\n");
- p += sprintf(p,
- " ------------------\n");
- pci_read_config_byte(bmide_dev, 0x78, &reg53h);
- p += sprintf(p, "PCI Clock: %d.\n", reg53h);
-
- pci_read_config_byte(bmide_dev, 0x53, &reg53h);
- p += sprintf(p,
- "CD_ROM FIFO:%s, CD_ROM DMA:%s\n",
- (reg53h & 0x02) ? "Yes" : "No ",
- (reg53h & 0x01) ? "Yes" : "No " );
- pci_read_config_byte(bmide_dev, 0x74, &reg53h);
- p += sprintf(p,
- "FIFO Status: contains %d Words, runs%s%s\n\n",
- (reg53h & 0x3f),
- (reg53h & 0x40) ? " OVERWR" : "",
- (reg53h & 0x80) ? " OVERRD." : "." );
-
- p += sprintf(p,
- "-------------------primary channel"
- "-------------------secondary channel"
- "---------\n\n");
-
- pci_read_config_byte(bmide_dev, 0x09, &reg53h);
- p += sprintf(p,
- "channel status: %s"
- " %s\n",
- (reg53h & 0x20) ? "On " : "Off",
- (reg53h & 0x10) ? "On " : "Off" );
-
- p += sprintf(p,
- "both channels togth: %s"
- " %s\n",
- (c0&0x80) ? "No " : "Yes",
- (c1&0x80) ? "No " : "Yes" );
-
- pci_read_config_byte(bmide_dev, 0x76, &reg53h);
- p += sprintf(p,
- "Channel state: %s %s\n",
- channel_status[reg53h & 0x07],
- channel_status[(reg53h & 0x70) >> 4] );
-
- pci_read_config_byte(bmide_dev, 0x58, &reg5xh);
- pci_read_config_byte(bmide_dev, 0x5c, &reg5yh);
- p += sprintf(p,
- "Add. Setup Timing: %dT"
- " %dT\n",
- (reg5xh & 0x07) ? (reg5xh & 0x07) : 8,
- (reg5yh & 0x07) ? (reg5yh & 0x07) : 8 );
-
- pci_read_config_byte(bmide_dev, 0x59, &reg5xh);
- pci_read_config_byte(bmide_dev, 0x5d, &reg5yh);
- p += sprintf(p,
- "Command Act. Count: %dT"
- " %dT\n"
- "Command Rec. Count: %dT"
- " %dT\n\n",
- (reg5xh & 0x70) ? ((reg5xh & 0x70) >> 4) : 8,
- (reg5yh & 0x70) ? ((reg5yh & 0x70) >> 4) : 8,
- (reg5xh & 0x0f) ? (reg5xh & 0x0f) : 16,
- (reg5yh & 0x0f) ? (reg5yh & 0x0f) : 16 );
-
- p += sprintf(p,
- "----------------drive0-----------drive1"
- "------------drive0-----------drive1------\n\n");
- p += sprintf(p,
- "DMA enabled: %s %s"
- " %s %s\n",
- (c0&0x20) ? "Yes" : "No ",
- (c0&0x40) ? "Yes" : "No ",
- (c1&0x20) ? "Yes" : "No ",
- (c1&0x40) ? "Yes" : "No " );
-
- pci_read_config_byte(bmide_dev, 0x54, &reg5xh);
- pci_read_config_byte(bmide_dev, 0x55, &reg5yh);
- q = "FIFO threshold: %2d Words %2d Words"
- " %2d Words %2d Words\n";
- if (rev < 0xc1) {
- if ((rev == 0x20) &&
- (pci_read_config_byte(bmide_dev, 0x4f, &tmp), (tmp &= 0x20))) {
- p += sprintf(p, q, 8, 8, 8, 8);
- } else {
- p += sprintf(p, q,
- (reg5xh & 0x03) + 12,
- ((reg5xh & 0x30)>>4) + 12,
- (reg5yh & 0x03) + 12,
- ((reg5yh & 0x30)>>4) + 12 );
- }
- } else {
- int t1 = (tmp = (reg5xh & 0x03)) ? (tmp << 3) : 4;
- int t2 = (tmp = ((reg5xh & 0x30)>>4)) ? (tmp << 3) : 4;
- int t3 = (tmp = (reg5yh & 0x03)) ? (tmp << 3) : 4;
- int t4 = (tmp = ((reg5yh & 0x30)>>4)) ? (tmp << 3) : 4;
- p += sprintf(p, q, t1, t2, t3, t4);
- }
-
-#if 0
- p += sprintf(p,
- "FIFO threshold: %2d Words %2d Words"
- " %2d Words %2d Words\n",
- (reg5xh & 0x03) + 12,
- ((reg5xh & 0x30)>>4) + 12,
- (reg5yh & 0x03) + 12,
- ((reg5yh & 0x30)>>4) + 12 );
-#endif
-
- p += sprintf(p,
- "FIFO mode: %s %s %s %s\n",
- fifo[((reg5xh & 0x0c) >> 2)],
- fifo[((reg5xh & 0xc0) >> 6)],
- fifo[((reg5yh & 0x0c) >> 2)],
- fifo[((reg5yh & 0xc0) >> 6)] );
-
- pci_read_config_byte(bmide_dev, 0x5a, &reg5xh);
- pci_read_config_byte(bmide_dev, 0x5b, &reg5xh1);
- pci_read_config_byte(bmide_dev, 0x5e, &reg5yh);
- pci_read_config_byte(bmide_dev, 0x5f, &reg5yh1);
-
- p += sprintf(p,/*
- "------------------drive0-----------drive1"
- "------------drive0-----------drive1------\n")*/
- "Dt RW act. Cnt %2dT %2dT"
- " %2dT %2dT\n"
- "Dt RW rec. Cnt %2dT %2dT"
- " %2dT %2dT\n\n",
- (reg5xh & 0x70) ? ((reg5xh & 0x70) >> 4) : 8,
- (reg5xh1 & 0x70) ? ((reg5xh1 & 0x70) >> 4) : 8,
- (reg5yh & 0x70) ? ((reg5yh & 0x70) >> 4) : 8,
- (reg5yh1 & 0x70) ? ((reg5yh1 & 0x70) >> 4) : 8,
- (reg5xh & 0x0f) ? (reg5xh & 0x0f) : 16,
- (reg5xh1 & 0x0f) ? (reg5xh1 & 0x0f) : 16,
- (reg5yh & 0x0f) ? (reg5yh & 0x0f) : 16,
- (reg5yh1 & 0x0f) ? (reg5yh1 & 0x0f) : 16 );
-
- p += sprintf(p,
- "-----------------------------------UDMA Timings"
- "--------------------------------\n\n");
-
- pci_read_config_byte(bmide_dev, 0x56, &reg5xh);
- pci_read_config_byte(bmide_dev, 0x57, &reg5yh);
- p += sprintf(p,
- "UDMA: %s %s"
- " %s %s\n"
- "UDMA timings: %s %s"
- " %s %s\n\n",
- (reg5xh & 0x08) ? "OK" : "No",
- (reg5xh & 0x80) ? "OK" : "No",
- (reg5yh & 0x08) ? "OK" : "No",
- (reg5yh & 0x80) ? "OK" : "No",
- udmaT[(reg5xh & 0x07)],
- udmaT[(reg5xh & 0x70) >> 4],
- udmaT[reg5yh & 0x07],
- udmaT[(reg5yh & 0x70) >> 4] );
-
- return p-buffer; /* => must be less than 4k! */
-}
-#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
-
/**
* ali_set_pio_mode - set host controller for PIO mode
* @drive: drive
@@ -294,7 +62,7 @@ static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio)
int s_time, a_time, c_time;
u8 s_clc, a_clc, r_clc;
unsigned long flags;
- int bus_speed = system_bus_clock();
+ int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
int port = hwif->channel ? 0x5c : 0x58;
int portFIFO = hwif->channel ? 0x55 : 0x54;
u8 cd_dma_fifo = 0;
@@ -465,14 +233,6 @@ static unsigned int __devinit init_chipset_ali15x3 (struct pci_dev *dev, const c
isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
-#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
- if (!ali_proc) {
- ali_proc = 1;
- bmide_dev = dev;
- ide_pci_create_host_proc("ali", ali_get_info);
- }
-#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
-
local_irq_save(flags);
if (m5229_revision < 0xC2) {
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index f7c883808b02..efcf54338be7 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -179,7 +179,7 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev,
* Determine the system bus clock.
*/
- amd_clock = system_bus_clock() * 1000;
+ amd_clock = (ide_pci_clk ? ide_pci_clk : system_bus_clock()) * 1000;
switch (amd_clock) {
case 33000: amd_clock = 33333; break;
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c
index 25c2f1bd175f..aaf38109eaec 100644
--- a/drivers/ide/pci/cmd640.c
+++ b/drivers/ide/pci/cmd640.c
@@ -111,10 +111,7 @@
#define DRV_NAME "cmd640"
-/*
- * This flag is set in ide.c by the parameter: ide0=cmd640_vlb
- */
-int cmd640_vlb;
+static int cmd640_vlb;
/*
* CMD640 specific registers definition.
@@ -350,12 +347,12 @@ static int __init secondary_port_responding(void)
spin_lock_irqsave(&cmd640_lock, flags);
- outb_p(0x0a, 0x170 + IDE_SELECT_OFFSET); /* select drive0 */
+ outb_p(0x0a, 0x176); /* select drive0 */
udelay(100);
- if ((inb_p(0x170 + IDE_SELECT_OFFSET) & 0x1f) != 0x0a) {
- outb_p(0x1a, 0x170 + IDE_SELECT_OFFSET); /* select drive1 */
+ if ((inb_p(0x176) & 0x1f) != 0x0a) {
+ outb_p(0x1a, 0x176); /* select drive1 */
udelay(100);
- if ((inb_p(0x170 + IDE_SELECT_OFFSET) & 0x1f) != 0x1a) {
+ if ((inb_p(0x176) & 0x1f) != 0x1a) {
spin_unlock_irqrestore(&cmd640_lock, flags);
return 0; /* nothing responded */
}
@@ -383,6 +380,7 @@ static void cmd640_dump_regs(void)
}
#endif
+#ifndef CONFIG_BLK_DEV_CMD640_ENHANCED
/*
* Check whether prefetch is on for a drive,
* and initialize the unmask flags for safe operation.
@@ -403,9 +401,7 @@ static void __init check_prefetch(ide_drive_t *drive, unsigned int index)
drive->no_io_32bit = 0;
}
}
-
-#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
-
+#else
/*
* Sets prefetch mode for a drive.
*/
@@ -462,34 +458,6 @@ static inline u8 pack_nibbles(u8 upper, u8 lower)
}
/*
- * This routine retrieves the initial drive timings from the chipset.
- */
-static void __init retrieve_drive_counts(unsigned int index)
-{
- u8 b;
-
- /*
- * Get the internal setup timing, and convert to clock count
- */
- b = get_cmd640_reg(arttim_regs[index]) & ~0x3f;
- switch (b) {
- case 0x00: b = 4; break;
- case 0x80: b = 3; break;
- case 0x40: b = 2; break;
- default: b = 5; break;
- }
- setup_counts[index] = b;
-
- /*
- * Get the active/recovery counts
- */
- b = get_cmd640_reg(drwtim_regs[index]);
- active_counts[index] = (b >> 4) ? (b >> 4) : 0x10;
- recovery_counts[index] = (b & 0x0f) ? (b & 0x0f) : 0x10;
-}
-
-
-/*
* This routine writes the prepared setup/active/recovery counts
* for a drive into the cmd640 chipset registers to active them.
*/
@@ -555,7 +523,14 @@ static void cmd640_set_mode(ide_drive_t *drive, unsigned int index,
{
int setup_time, active_time, recovery_time, clock_time;
u8 setup_count, active_count, recovery_count, recovery_count2, cycle_count;
- int bus_speed = system_bus_clock();
+ int bus_speed;
+
+ if (cmd640_vlb && ide_vlb_clk)
+ bus_speed = ide_vlb_clk;
+ else if (!cmd640_vlb && ide_pci_clk)
+ bus_speed = ide_pci_clk;
+ else
+ bus_speed = system_bus_clock();
if (pio_mode > 5)
pio_mode = 5;
@@ -679,7 +654,6 @@ static const struct ide_port_info cmd640_port_info __initdata = {
.chipset = ide_cmd640,
.host_flags = IDE_HFLAG_SERIALIZE |
IDE_HFLAG_NO_DMA |
- IDE_HFLAG_NO_AUTOTUNE |
IDE_HFLAG_ABUSE_PREFETCH |
IDE_HFLAG_ABUSE_FAST_DEVSEL,
#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
@@ -862,29 +836,16 @@ static int __init cmd640x_init(void)
}
#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
- if (drive->autotune || ((index > 1) && second_port_toggled)) {
- /*
- * Reset timing to the slowest speed and turn off
- * prefetch. This way, the drive identify code has
- * a better chance.
- */
- setup_counts [index] = 4; /* max possible */
- active_counts [index] = 16; /* max possible */
- recovery_counts [index] = 16; /* max possible */
- program_drive_counts(drive, index);
- set_prefetch_mode(drive, index, 0);
- printk("cmd640: drive%d timings/prefetch cleared\n", index);
- } else {
- /*
- * Record timings/prefetch without changing them.
- * This preserves any prior BIOS setup.
- */
- retrieve_drive_counts (index);
- check_prefetch(drive, index);
- printk("cmd640: drive%d timings/prefetch(%s) preserved",
- index, drive->no_io_32bit ? "off" : "on");
- display_clocks(index);
- }
+ /*
+ * Reset timing to the slowest speed and turn off prefetch.
+ * This way, the drive identify code has a better chance.
+ */
+ setup_counts [index] = 4; /* max possible */
+ active_counts [index] = 16; /* max possible */
+ recovery_counts [index] = 16; /* max possible */
+ program_drive_counts(drive, index);
+ set_prefetch_mode(drive, index, 0);
+ printk("cmd640: drive%d timings/prefetch cleared\n", index);
#else
/*
* Set the drive unmask flags to match the prefetch setting
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index 006fb62656bc..08674711d089 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -68,8 +68,8 @@ static u8 quantize_timing(int timing, int quant)
*/
static void program_cycle_times (ide_drive_t *drive, int cycle_time, int active_time)
{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- int clock_time = 1000 / system_bus_clock();
+ struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
+ int clock_time = 1000 / (ide_pci_clk ? ide_pci_clk : system_bus_clock());
u8 cycle_count, active_count, recovery_count, drwtim;
static const u8 recovery_values[] =
{15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0};
@@ -128,7 +128,7 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
ide_pio_timings[pio].active_time);
setup_count = quantize_timing(ide_pio_timings[pio].setup_time,
- 1000 / system_bus_clock());
+ 1000 / (ide_pci_clk ? ide_pci_clk : system_bus_clock()));
/*
* The primary channel has individual address setup timing registers
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c
index e30eae5a01b6..77cc22c2ad45 100644
--- a/drivers/ide/pci/cy82c693.c
+++ b/drivers/ide/pci/cy82c693.c
@@ -18,8 +18,6 @@
* hdparm -t reports 8.17 MB/sec at about 6% CPU usage for the DTTA
* - this is my first linux driver, so there's probably a lot of room
* for optimizations and bug fixing, so feel free to do it.
- * - use idebus=xx parameter to set PCI bus speed - needed to calc
- * timings for PIO modes (default will be 40)
* - if using PIO mode it's a good idea to set the PIO mode and
* 32-bit I/O support (if possible), e.g. hdparm -p2 -c1 /dev/hda
* - I had some problems with my IBM DHEA with PIO modes < 2
@@ -136,7 +134,7 @@ static int calc_clk(int time, int bus_speed)
static void compute_clocks(u8 pio, pio_clocks_t *p_pclk)
{
int clk1, clk2;
- int bus_speed = system_bus_clock(); /* get speed of PCI bus */
+ int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
/* we don't check against CY82C693's min and max speed,
* so you can play with the idebus=xx parameter
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
index c7b7e0483287..b9e457996d0e 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/pci/delkin_cb.c
@@ -87,11 +87,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
i = hwif->index;
- if (hwif->present)
- ide_unregister(i);
- else
- ide_init_port_data(hwif, i);
-
+ ide_init_port_data(hwif, i);
ide_init_port_hw(hwif, &hw);
hwif->port_ops = &delkin_cb_port_ops;
@@ -123,8 +119,7 @@ delkin_cb_remove (struct pci_dev *dev)
{
ide_hwif_t *hwif = pci_get_drvdata(dev);
- if (hwif)
- ide_unregister(hwif->index);
+ ide_unregister(hwif);
pci_release_regions(dev);
pci_disable_device(dev);
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index 8c02961d0188..c929dadaaaff 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -760,7 +760,7 @@ static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
}
} else
outb(mask ? (drive->ctl | 2) : (drive->ctl & ~2),
- hwif->io_ports[IDE_CONTROL_OFFSET]);
+ hwif->io_ports.ctl_addr);
}
/*
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/pci/ns87415.c
index e1b0c9a9ab9c..c13e299077ec 100644
--- a/drivers/ide/pci/ns87415.c
+++ b/drivers/ide/pci/ns87415.c
@@ -72,8 +72,8 @@ static void __devinit superio_ide_init_iops (struct hwif_s *hwif)
base = pci_resource_start(pdev, port * 2) & ~3;
dmabase = pci_resource_start(pdev, 4) & ~3;
- superio_ide_status[port] = base + IDE_STATUS_OFFSET;
- superio_ide_select[port] = base + IDE_SELECT_OFFSET;
+ superio_ide_status[port] = base + 7;
+ superio_ide_select[port] = base + 6;
superio_ide_dma_status[port] = dmabase + (!port ? 2 : 0xa);
/* Clear error/interrupt, enable dma */
@@ -231,12 +231,12 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
* SELECT_DRIVE() properly during first ide_probe_port().
*/
timeout = 10000;
- outb(12, hwif->io_ports[IDE_CONTROL_OFFSET]);
+ outb(12, hwif->io_ports.ctl_addr);
udelay(10);
- outb(8, hwif->io_ports[IDE_CONTROL_OFFSET]);
+ outb(8, hwif->io_ports.ctl_addr);
do {
udelay(50);
- stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ stat = hwif->INB(hwif->io_ports.status_addr);
if (stat == 0xff)
break;
} while ((stat & BUSY_STAT) && --timeout);
@@ -244,7 +244,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
}
if (!using_inta)
- hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET]);
+ hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
else if (!hwif->irq && hwif->mate && hwif->mate->irq)
hwif->irq = hwif->mate->irq; /* share IRQ with mate */
diff --git a/drivers/ide/pci/opti621.c b/drivers/ide/pci/opti621.c
index 9edacba20ffb..6e99080497bf 100644
--- a/drivers/ide/pci/opti621.c
+++ b/drivers/ide/pci/opti621.c
@@ -53,8 +53,7 @@
* If you then set the second drive to another PIO, the old value
* (automatically selected) will be overrided by yours.
* There is a 25/33MHz switch in configuration
- * register, but driver is written for use at any frequency which get
- * (use idebus=xx to select PCI bus speed).
+ * register, but driver is written for use at any frequency.
*
* Version 0.1, Nov 8, 1996
* by Jaromir Koutek, for 2.1.8.
@@ -210,7 +209,7 @@ static void compute_clocks(int pio, pio_clocks_t *clks)
{
if (pio != PIO_NOT_EXIST) {
int adr_setup, data_pls;
- int bus_speed = system_bus_clock();
+ int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
adr_setup = ide_pio_timings[pio].setup_time;
data_pls = ide_pio_timings[pio].active_time;
@@ -280,7 +279,7 @@ static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio)
spin_lock_irqsave(&opti621_lock, flags);
- reg_base = hwif->io_ports[IDE_DATA_OFFSET];
+ reg_base = hwif->io_ports.data_addr;
/* allow Register-B */
outb(0xc0, reg_base + CNTRL_REG);
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index 17cf86490d59..ad7cdf9060ca 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -334,7 +334,7 @@ static int scc_dma_end(ide_drive_t *drive)
/* errata A308 workaround: Step5 (check data loss) */
/* We don't check non ide_disk because it is limited to UDMA4 */
- if (!(in_be32((void __iomem *)hwif->io_ports[IDE_ALTSTATUS_OFFSET])
+ if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr)
& ERR_STAT) &&
drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
reg = in_be32((void __iomem *)intsts_port);
@@ -438,7 +438,7 @@ static int scc_dma_test_irq(ide_drive_t *drive)
u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
/* SCC errata A252,A308 workaround: Step4 */
- if ((in_be32((void __iomem *)hwif->io_ports[IDE_ALTSTATUS_OFFSET])
+ if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr)
& ERR_STAT) &&
(int_stat & INTSTS_INTRQ))
return 1;
@@ -534,8 +534,8 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
}
memset(&hw, 0, sizeof(hw));
- for (i = IDE_DATA_OFFSET; i <= IDE_CONTROL_OFFSET; i++)
- hw.io_ports[i] = ports->dma + 0x20 + i * 4;
+ for (i = 0; i <= 8; i++)
+ hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
hw.irq = dev->irq;
hw.dev = &dev->dev;
hw.chipset = ide_pci;
@@ -763,9 +763,8 @@ static void __devexit scc_remove(struct pci_dev *dev)
hwif->dmatable_cpu = NULL;
}
- ide_unregister(hwif->index);
+ ide_unregister(hwif);
- hwif->chipset = ide_unknown;
iounmap((void*)ports->dma);
iounmap((void*)ports->ctl);
pci_release_selected_regions(dev, (1 << 2) - 1);
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index 321a4e28ac19..63e28f4e6d3b 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -98,28 +98,28 @@ sgiioc4_init_hwif_ports(hw_regs_t * hw, unsigned long data_port,
int i;
/* Registers are word (32 bit) aligned */
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
- hw->io_ports[i] = reg + i * 4;
+ for (i = 0; i <= 7; i++)
+ hw->io_ports_array[i] = reg + i * 4;
if (ctrl_port)
- hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
+ hw->io_ports.ctl_addr = ctrl_port;
if (irq_port)
- hw->io_ports[IDE_IRQ_OFFSET] = irq_port;
+ hw->io_ports.irq_addr = irq_port;
}
static void
sgiioc4_maskproc(ide_drive_t * drive, int mask)
{
writeb(mask ? (drive->ctl | 2) : (drive->ctl & ~2),
- (void __iomem *)drive->hwif->io_ports[IDE_CONTROL_OFFSET]);
+ (void __iomem *)drive->hwif->io_ports.ctl_addr);
}
static int
sgiioc4_checkirq(ide_hwif_t * hwif)
{
unsigned long intr_addr =
- hwif->io_ports[IDE_IRQ_OFFSET] + IOC4_INTR_REG * 4;
+ hwif->io_ports.irq_addr + IOC4_INTR_REG * 4;
if ((u8)readl((void __iomem *)intr_addr) & 0x03)
return 1;
@@ -134,8 +134,8 @@ sgiioc4_clearirq(ide_drive_t * drive)
{
u32 intr_reg;
ide_hwif_t *hwif = HWIF(drive);
- unsigned long other_ir =
- hwif->io_ports[IDE_IRQ_OFFSET] + (IOC4_INTR_REG << 2);
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ unsigned long other_ir = io_ports->irq_addr + (IOC4_INTR_REG << 2);
/* Code to check for PCI error conditions */
intr_reg = readl((void __iomem *)other_ir);
@@ -147,12 +147,12 @@ sgiioc4_clearirq(ide_drive_t * drive)
* a "clear" status if it got cleared. If not, then spin
* for a bit trying to clear it.
*/
- u8 stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ u8 stat = sgiioc4_INB(io_ports->status_addr);
int count = 0;
- stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ stat = sgiioc4_INB(io_ports->status_addr);
while ((stat & 0x80) && (count++ < 100)) {
udelay(1);
- stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ stat = sgiioc4_INB(io_ports->status_addr);
}
if (intr_reg & 0x02) {
@@ -162,9 +162,9 @@ sgiioc4_clearirq(ide_drive_t * drive)
pci_stat_cmd_reg;
pci_err_addr_low =
- readl((void __iomem *)hwif->io_ports[IDE_IRQ_OFFSET]);
+ readl((void __iomem *)io_ports->irq_addr);
pci_err_addr_high =
- readl((void __iomem *)(hwif->io_ports[IDE_IRQ_OFFSET] + 4));
+ readl((void __iomem *)(io_ports->irq_addr + 4));
pci_read_config_dword(dev, PCI_COMMAND,
&pci_stat_cmd_reg);
printk(KERN_ERR
@@ -573,7 +573,6 @@ static const struct ide_port_info sgiioc4_port_info __devinitdata = {
.init_dma = ide_dma_sgiioc4,
.port_ops = &sgiioc4_port_ops,
.dma_ops = &sgiioc4_dma_ops,
- .host_flags = IDE_HFLAG_NO_AUTOTUNE,
.mwdma_mask = ATA_MWDMA2_ONLY,
};
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 1fffea3211bd..c2040a017f47 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -622,9 +622,10 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
struct pci_dev *dev = to_pci_dev(hwif->dev);
void *addr = pci_get_drvdata(dev);
u8 ch = hwif->channel;
- hw_regs_t hw;
unsigned long base;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+
/*
* Fill in the basic HWIF bits
*/
@@ -638,7 +639,7 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
* based I/O
*/
- memset(&hw, 0, sizeof(hw_regs_t));
+ memset(io_ports, 0, sizeof(*io_ports));
base = (unsigned long)addr;
if (ch)
@@ -651,17 +652,15 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
* so we can't currently use it sanely since we want to
* use LBA48 mode.
*/
- hw.io_ports[IDE_DATA_OFFSET] = base;
- hw.io_ports[IDE_ERROR_OFFSET] = base + 1;
- hw.io_ports[IDE_NSECTOR_OFFSET] = base + 2;
- hw.io_ports[IDE_SECTOR_OFFSET] = base + 3;
- hw.io_ports[IDE_LCYL_OFFSET] = base + 4;
- hw.io_ports[IDE_HCYL_OFFSET] = base + 5;
- hw.io_ports[IDE_SELECT_OFFSET] = base + 6;
- hw.io_ports[IDE_STATUS_OFFSET] = base + 7;
- hw.io_ports[IDE_CONTROL_OFFSET] = base + 10;
-
- hw.io_ports[IDE_IRQ_OFFSET] = 0;
+ io_ports->data_addr = base;
+ io_ports->error_addr = base + 1;
+ io_ports->nsect_addr = base + 2;
+ io_ports->lbal_addr = base + 3;
+ io_ports->lbam_addr = base + 4;
+ io_ports->lbah_addr = base + 5;
+ io_ports->device_addr = base + 6;
+ io_ports->status_addr = base + 7;
+ io_ports->ctl_addr = base + 10;
if (pdev_is_sata(dev)) {
base = (unsigned long)addr;
@@ -672,8 +671,6 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
hwif->sata_scr[SATA_CONTROL_OFFSET] = base + 0x100;
}
- memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
-
hwif->irq = dev->irq;
hwif->dma_base = (unsigned long)addr + (ch ? 0x08 : 0x00);
diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/pci/trm290.c
index 15ee38f7ad3f..a8a3138682ef 100644
--- a/drivers/ide/pci/trm290.c
+++ b/drivers/ide/pci/trm290.c
@@ -298,7 +298,7 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
if (old != compat && old_mask == 0xff) {
/* leave lower 10 bits untouched */
compat += (next_offset += 0x400);
- hwif->io_ports[IDE_CONTROL_OFFSET] = compat + 2;
+ hwif->io_ports.ctl_addr = compat + 2;
outw(compat | 1, hwif->config_data);
new = inw(hwif->config_data);
printk(KERN_INFO "%s: control basereg workaround: "
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index bbd17bec6ffe..566e0ecb8db1 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -340,7 +340,7 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const
* Determine system bus clock.
*/
- via_clock = system_bus_clock() * 1000;
+ via_clock = (ide_pci_clk ? ide_pci_clk : system_bus_clock()) * 1000;
switch (via_clock) {
case 33000: via_clock = 33333; break;
diff --git a/drivers/ide/ppc/mpc8xx.c b/drivers/ide/ppc/mpc8xx.c
index a82f6efb660b..f0e638dcc3ab 100644
--- a/drivers/ide/ppc/mpc8xx.c
+++ b/drivers/ide/ppc/mpc8xx.c
@@ -131,7 +131,7 @@ static int pcmcia_schlvl = PCMCIA_SCHLVL;
#if defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_DIRECT)
static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
{
- unsigned long *p = hw->io_ports;
+ unsigned long *p = hw->io_ports_array;
int i;
typedef struct {
@@ -314,7 +314,7 @@ static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
#if defined(CONFIG_IDE_EXT_DIRECT)
static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
{
- unsigned long *p = hw->io_ports;
+ unsigned long *p = hw->io_ports_array;
int i;
u32 ide_phy_base;
@@ -811,24 +811,28 @@ static int __init mpc8xx_ide_probe(void)
#ifdef IDE0_BASE_OFFSET
memset(&hw, 0, sizeof(hw));
if (!m8xx_ide_init_ports(&hw, 0)) {
- ide_hwif_t *hwif = &ide_hwifs[0];
+ ide_hwif_t *hwif = ide_find_port();
- ide_init_port_hw(hwif, &hw);
- hwif->pio_mask = ATA_PIO4;
- hwif->port_ops = &m8xx_port_ops;
+ if (hwif) {
+ ide_init_port_hw(hwif, &hw);
+ hwif->pio_mask = ATA_PIO4;
+ hwif->port_ops = &m8xx_port_ops;
- idx[0] = 0;
+ idx[0] = hwif->index;
+ }
}
#ifdef IDE1_BASE_OFFSET
memset(&hw, 0, sizeof(hw));
if (!m8xx_ide_init_ports(&hw, 1)) {
- ide_hwif_t *mate = &ide_hwifs[1];
+ ide_hwif_t *mate = ide_find_port();
- ide_init_port_hw(mate, &hw);
- mate->pio_mask = ATA_PIO4;
- mate->port_ops = &m8xx_port_ops;
+ if (mate) {
+ ide_init_port_hw(mate, &hw);
+ mate->pio_mask = ATA_PIO4;
+ mate->port_ops = &m8xx_port_ops;
- idx[1] = 1;
+ idx[1] = mate->index;
+ }
}
#endif
#endif
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 185faa0dce94..3cac6b2790dd 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -417,7 +417,7 @@ static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
#define PMAC_IDE_REG(x) \
- ((void __iomem *)((drive)->hwif->io_ports[IDE_DATA_OFFSET] + (x)))
+ ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x)))
/*
* Apply the timings of the proper unit (master/slave) to the shared
@@ -1086,8 +1086,9 @@ static void __devinit pmac_ide_init_ports(hw_regs_t *hw, unsigned long base)
int i;
for (i = 0; i < 8; ++i)
- hw->io_ports[i] = base + i * 0x10;
- hw->io_ports[8] = base + 0x160;
+ hw->io_ports_array[i] = base + i * 0x10;
+
+ hw->io_ports.ctl_addr = base + 0x160;
}
/*
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 0d13fe0a260b..3d6d9461c31d 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -160,6 +160,7 @@ struct ehca_qp {
};
u32 qp_type;
enum ehca_ext_qp_type ext_type;
+ enum ib_qp_state state;
struct ipz_queue ipz_squeue;
struct ipz_queue ipz_rqueue;
struct h_galpas galpas;
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index b5ca94c6b8d9..ca5eb0cb628c 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -633,7 +633,7 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
unsigned long flags;
WARN_ON_ONCE(!in_interrupt());
- if (ehca_debug_level)
+ if (ehca_debug_level >= 3)
ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
spin_lock_irqsave(&pool->last_cpu_lock, flags);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 65b3362cdb9b..65048976198c 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -50,7 +50,7 @@
#include "ehca_tools.h"
#include "hcp_if.h"
-#define HCAD_VERSION "0025"
+#define HCAD_VERSION "0026"
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
@@ -60,7 +60,6 @@ MODULE_VERSION(HCAD_VERSION);
static int ehca_open_aqp1 = 0;
static int ehca_hw_level = 0;
static int ehca_poll_all_eqs = 1;
-static int ehca_mr_largepage = 1;
int ehca_debug_level = 0;
int ehca_nr_ports = 2;
@@ -70,45 +69,40 @@ int ehca_static_rate = -1;
int ehca_scaling_code = 0;
int ehca_lock_hcalls = -1;
-module_param_named(open_aqp1, ehca_open_aqp1, int, S_IRUGO);
-module_param_named(debug_level, ehca_debug_level, int, S_IRUGO);
-module_param_named(hw_level, ehca_hw_level, int, S_IRUGO);
-module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO);
-module_param_named(use_hp_mr, ehca_use_hp_mr, int, S_IRUGO);
-module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO);
-module_param_named(poll_all_eqs, ehca_poll_all_eqs, int, S_IRUGO);
-module_param_named(static_rate, ehca_static_rate, int, S_IRUGO);
-module_param_named(scaling_code, ehca_scaling_code, int, S_IRUGO);
-module_param_named(mr_largepage, ehca_mr_largepage, int, S_IRUGO);
+module_param_named(open_aqp1, ehca_open_aqp1, bool, S_IRUGO);
+module_param_named(debug_level, ehca_debug_level, int, S_IRUGO);
+module_param_named(hw_level, ehca_hw_level, int, S_IRUGO);
+module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO);
+module_param_named(use_hp_mr, ehca_use_hp_mr, bool, S_IRUGO);
+module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO);
+module_param_named(poll_all_eqs, ehca_poll_all_eqs, bool, S_IRUGO);
+module_param_named(static_rate, ehca_static_rate, int, S_IRUGO);
+module_param_named(scaling_code, ehca_scaling_code, bool, S_IRUGO);
module_param_named(lock_hcalls, ehca_lock_hcalls, bool, S_IRUGO);
MODULE_PARM_DESC(open_aqp1,
- "AQP1 on startup (0: no (default), 1: yes)");
+ "Open AQP1 on startup (default: no)");
MODULE_PARM_DESC(debug_level,
- "debug level"
- " (0: no debug traces (default), 1: with debug traces)");
+ "Amount of debug output (0: none (default), 1: traces, "
+ "2: some dumps, 3: lots)");
MODULE_PARM_DESC(hw_level,
- "hardware level"
- " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)");
+ "Hardware level (0: autosensing (default), "
+ "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
MODULE_PARM_DESC(nr_ports,
"number of connected ports (-1: autodetect, 1: port one only, "
"2: two ports (default)");
MODULE_PARM_DESC(use_hp_mr,
- "high performance MRs (0: no (default), 1: yes)");
+ "Use high performance MRs (default: no)");
MODULE_PARM_DESC(port_act_time,
- "time to wait for port activation (default: 30 sec)");
+ "Time to wait for port activation (default: 30 sec)");
MODULE_PARM_DESC(poll_all_eqs,
- "polls all event queues periodically"
- " (0: no, 1: yes (default))");
+ "Poll all event queues periodically (default: yes)");
MODULE_PARM_DESC(static_rate,
- "set permanent static rate (default: disabled)");
+ "Set permanent static rate (default: no static rate)");
MODULE_PARM_DESC(scaling_code,
- "set scaling code (0: disabled/default, 1: enabled)");
-MODULE_PARM_DESC(mr_largepage,
- "use large page for MR (0: use PAGE_SIZE (default), "
- "1: use large page depending on MR size");
+ "Enable scaling code (default: no)");
MODULE_PARM_DESC(lock_hcalls,
- "serialize all hCalls made by the driver "
+ "Serialize all hCalls made by the driver "
"(default: autodetect)");
DEFINE_RWLOCK(ehca_qp_idr_lock);
@@ -275,6 +269,7 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
u64 h_ret;
struct hipz_query_hca *rblock;
struct hipz_query_port *port;
+ const char *loc_code;
static const u32 pgsize_map[] = {
HCA_CAP_MR_PGSIZE_4K, 0x1000,
@@ -283,6 +278,12 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
HCA_CAP_MR_PGSIZE_16M, 0x1000000,
};
+ ehca_gen_dbg("Probing adapter %s...",
+ shca->ofdev->node->full_name);
+ loc_code = of_get_property(shca->ofdev->node, "ibm,loc-code", NULL);
+ if (loc_code)
+ ehca_gen_dbg(" ... location lode=%s", loc_code);
+
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
ehca_gen_err("Cannot allocate rblock memory.");
@@ -350,11 +351,9 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
/* translate supported MR page sizes; always support 4K */
shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
- if (ehca_mr_largepage) { /* support extra sizes only if enabled */
- for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
- if (rblock->memory_page_size_supported & pgsize_map[i])
- shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
- }
+ for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
+ if (rblock->memory_page_size_supported & pgsize_map[i])
+ shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
/* query max MTU from first port -- it's the same for all ports */
port = (struct hipz_query_port *)rblock;
@@ -567,8 +566,7 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport)
static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- ehca_debug_level);
+ return snprintf(buf, PAGE_SIZE, "%d\n", ehca_debug_level);
}
static ssize_t ehca_store_debug_level(struct device_driver *ddp,
@@ -657,14 +655,6 @@ static ssize_t ehca_show_adapter_handle(struct device *dev,
}
static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
-static ssize_t ehca_show_mr_largepage(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%d\n", ehca_mr_largepage);
-}
-static DEVICE_ATTR(mr_largepage, S_IRUGO, ehca_show_mr_largepage, NULL);
-
static struct attribute *ehca_dev_attrs[] = {
&dev_attr_adapter_handle.attr,
&dev_attr_num_ports.attr,
@@ -681,7 +671,6 @@ static struct attribute *ehca_dev_attrs[] = {
&dev_attr_cur_mw.attr,
&dev_attr_max_pd.attr,
&dev_attr_max_ah.attr,
- &dev_attr_mr_largepage.attr,
NULL
};
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index f26997fc00f8..46ae4eb2c4e1 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -1794,8 +1794,9 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
int t;
for (t = start_idx; t <= end_idx; t++) {
u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
- ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
- *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
+ if (ehca_debug_level >= 3)
+ ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
+ *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
ehca_gen_err("uncontiguous page found pgaddr=%lx "
"prev_pgaddr=%lx page_list_i=%x",
@@ -1862,10 +1863,13 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
pgaddr &
~(pginfo->hwpage_size - 1));
}
- ehca_gen_dbg("kpage=%lx chunk_page=%lx "
- "value=%016lx", *kpage, pgaddr,
- *(u64 *)abs_to_virt(
- phys_to_abs(pgaddr)));
+ if (ehca_debug_level >= 3) {
+ u64 val = *(u64 *)abs_to_virt(
+ phys_to_abs(pgaddr));
+ ehca_gen_dbg("kpage=%lx chunk_page=%lx "
+ "value=%016lx",
+ *kpage, pgaddr, val);
+ }
prev_pgaddr = pgaddr;
i++;
pginfo->kpage_cnt++;
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 3eb14a52cbf2..57bef1152cc2 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -550,6 +550,7 @@ static struct ehca_qp *internal_create_qp(
spin_lock_init(&my_qp->spinlock_r);
my_qp->qp_type = qp_type;
my_qp->ext_type = parms.ext_type;
+ my_qp->state = IB_QPS_RESET;
if (init_attr->recv_cq)
my_qp->recv_cq =
@@ -965,7 +966,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
qp_num, bad_send_wqe_p);
/* convert wqe pointer to vadr */
bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p);
- if (ehca_debug_level)
+ if (ehca_debug_level >= 2)
ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
squeue = &my_qp->ipz_squeue;
if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
@@ -978,7 +979,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
*bad_wqe_cnt = 0;
while (wqe->optype != 0xff && wqe->wqef != 0xff) {
- if (ehca_debug_level)
+ if (ehca_debug_level >= 2)
ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
wqe->nr_of_data_seg = 0; /* suppress data access */
wqe->wqef = WQEF_PURGE; /* WQE to be purged */
@@ -1450,7 +1451,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
/* no support for max_send/recv_sge yet */
}
- if (ehca_debug_level)
+ if (ehca_debug_level >= 2)
ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
@@ -1508,6 +1509,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
if (attr_mask & IB_QP_QKEY)
my_qp->qkey = attr->qkey;
+ my_qp->state = qp_new_state;
+
modify_qp_exit2:
if (squeue_locked) { /* this means: sqe -> rts */
spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
@@ -1763,7 +1766,7 @@ int ehca_query_qp(struct ib_qp *qp,
if (qp_init_attr)
*qp_init_attr = my_qp->init_attr;
- if (ehca_debug_level)
+ if (ehca_debug_level >= 2)
ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
query_qp_exit1:
@@ -1811,7 +1814,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
goto modify_srq_exit0;
}
- if (ehca_debug_level)
+ if (ehca_debug_level >= 2)
ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
@@ -1864,7 +1867,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
srq_attr->srq_limit = EHCA_BMASK_GET(
MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit);
- if (ehca_debug_level)
+ if (ehca_debug_level >= 2)
ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
query_srq_exit1:
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index a20bbf466188..bbe0436f4f75 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -81,7 +81,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
recv_wr->sg_list[cnt_ds].length;
}
- if (ehca_debug_level) {
+ if (ehca_debug_level >= 3) {
ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
ipz_rqueue);
ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
@@ -281,7 +281,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
return -EINVAL;
}
- if (ehca_debug_level) {
+ if (ehca_debug_level >= 3) {
ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
}
@@ -421,6 +421,11 @@ int ehca_post_send(struct ib_qp *qp,
int ret = 0;
unsigned long flags;
+ if (unlikely(my_qp->state != IB_QPS_RTS)) {
+ ehca_err(qp->device, "QP not in RTS state qpn=%x", qp->qp_num);
+ return -EINVAL;
+ }
+
/* LOCK the QUEUE */
spin_lock_irqsave(&my_qp->spinlock_s, flags);
@@ -454,13 +459,14 @@ int ehca_post_send(struct ib_qp *qp,
goto post_send_exit0;
}
wqe_cnt++;
- ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
- my_qp, qp->qp_num, wqe_cnt);
} /* eof for cur_send_wr */
post_send_exit0:
iosync(); /* serialize GAL register access */
hipz_update_sqa(my_qp, wqe_cnt);
+ if (unlikely(ret || ehca_debug_level >= 2))
+ ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
+ my_qp, qp->qp_num, wqe_cnt, ret);
my_qp->message_count += wqe_cnt;
spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
return ret;
@@ -520,13 +526,14 @@ static int internal_post_recv(struct ehca_qp *my_qp,
goto post_recv_exit0;
}
wqe_cnt++;
- ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
- my_qp, my_qp->real_qp_num, wqe_cnt);
} /* eof for cur_recv_wr */
post_recv_exit0:
iosync(); /* serialize GAL register access */
hipz_update_rqa(my_qp, wqe_cnt);
+ if (unlikely(ret || ehca_debug_level >= 2))
+ ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
+ my_qp, my_qp->real_qp_num, wqe_cnt, ret);
spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
return ret;
}
@@ -570,16 +577,17 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
struct ehca_cqe *cqe;
struct ehca_qp *my_qp;
- int cqe_count = 0;
+ int cqe_count = 0, is_error;
poll_cq_one_read_cqe:
cqe = (struct ehca_cqe *)
ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
if (!cqe) {
ret = -EAGAIN;
- ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p "
- "cq_num=%x ret=%i", my_cq, my_cq->cq_number, ret);
- goto poll_cq_one_exit0;
+ if (ehca_debug_level >= 3)
+ ehca_dbg(cq->device, "Completion queue is empty "
+ "my_cq=%p cq_num=%x", my_cq, my_cq->cq_number);
+ goto poll_cq_one_exit0;
}
/* prevents loads being reordered across this point */
@@ -609,7 +617,7 @@ poll_cq_one_read_cqe:
ehca_dbg(cq->device,
"Got CQE with purged bit qp_num=%x src_qp=%x",
cqe->local_qp_number, cqe->remote_qp_number);
- if (ehca_debug_level)
+ if (ehca_debug_level >= 2)
ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
cqe->local_qp_number,
cqe->remote_qp_number);
@@ -622,11 +630,13 @@ poll_cq_one_read_cqe:
}
}
- /* tracing cqe */
- if (unlikely(ehca_debug_level)) {
+ is_error = cqe->status & WC_STATUS_ERROR_BIT;
+
+ /* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */
+ if (unlikely(ehca_debug_level >= 3 || (ehca_debug_level && is_error))) {
ehca_dbg(cq->device,
- "Received COMPLETION ehca_cq=%p cq_num=%x -----",
- my_cq, my_cq->cq_number);
+ "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----",
+ is_error ? "ERROR " : "", my_cq, my_cq->cq_number);
ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
my_cq, my_cq->cq_number);
ehca_dbg(cq->device,
@@ -649,8 +659,9 @@ poll_cq_one_read_cqe:
/* update also queue adder to throw away this entry!!! */
goto poll_cq_one_exit0;
}
+
/* eval ib_wc_status */
- if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) {
+ if (unlikely(is_error)) {
/* complete with errors */
map_ib_wc_status(cqe->status, &wc->status);
wc->vendor_err = wc->status;
@@ -671,14 +682,6 @@ poll_cq_one_read_cqe:
wc->imm_data = cpu_to_be32(cqe->immediate_data);
wc->sl = cqe->service_level;
- if (unlikely(wc->status != IB_WC_SUCCESS))
- ehca_dbg(cq->device,
- "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
- "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
- "cqe=%p", my_cq, my_cq->cq_number, cqe->optype,
- cqe->status, cqe->local_qp_number,
- cqe->remote_qp_number, cqe->work_request_id, cqe);
-
poll_cq_one_exit0:
if (cqe_count > 0)
hipz_update_feca(my_cq, cqe_count);
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index 1b07f2beafaf..e43ed8f8a0c8 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -211,8 +211,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
break;
case 1: /* qp rqueue_addr */
- ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue",
- qp->ib_qp.qp_num);
+ ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num);
ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
&qp->mm_count_rqueue);
if (unlikely(ret)) {
@@ -224,8 +223,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
break;
case 2: /* qp squeue_addr */
- ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue",
- qp->ib_qp.qp_num);
+ ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num);
ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
&qp->mm_count_squeue);
if (unlikely(ret)) {
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 7029aa653751..5245e13c3a30 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -123,8 +123,9 @@ static long ehca_plpar_hcall_norets(unsigned long opcode,
int i, sleep_msecs;
unsigned long flags = 0;
- ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
- opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ if (unlikely(ehca_debug_level >= 2))
+ ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
+ opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
for (i = 0; i < 5; i++) {
/* serialize hCalls to work around firmware issue */
@@ -148,7 +149,8 @@ static long ehca_plpar_hcall_norets(unsigned long opcode,
opcode, ret, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
else
- ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
+ if (unlikely(ehca_debug_level >= 2))
+ ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
return ret;
}
@@ -172,8 +174,10 @@ static long ehca_plpar_hcall9(unsigned long opcode,
int i, sleep_msecs;
unsigned long flags = 0;
- ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
- arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+ if (unlikely(ehca_debug_level >= 2))
+ ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
+ arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9);
for (i = 0; i < 5; i++) {
/* serialize hCalls to work around firmware issue */
@@ -201,7 +205,7 @@ static long ehca_plpar_hcall9(unsigned long opcode,
ret, outs[0], outs[1], outs[2], outs[3],
outs[4], outs[5], outs[6], outs[7],
outs[8]);
- } else
+ } else if (unlikely(ehca_debug_level >= 2))
ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
ret, outs[0], outs[1], outs[2], outs[3],
outs[4], outs[5], outs[6], outs[7],
@@ -381,7 +385,7 @@ u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
r_cb, /* r6 */
0, 0, 0, 0);
- if (ehca_debug_level)
+ if (ehca_debug_level >= 2)
ehca_dmp(query_port_response_block, 64, "response_block");
return ret;
@@ -731,9 +735,6 @@ u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
u64 ret;
u64 outs[PLPAR_HCALL9_BUFSIZE];
- ehca_gen_dbg("kernel PAGE_SIZE=%x access_ctrl=%016x "
- "vaddr=%lx length=%lx",
- (u32)PAGE_SIZE, access_ctrl, vaddr, length);
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
5, /* r5 */
@@ -758,7 +759,7 @@ u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
{
u64 ret;
- if (unlikely(ehca_debug_level >= 2)) {
+ if (unlikely(ehca_debug_level >= 3)) {
if (count > 1) {
u64 *kpage;
int i;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 3557e7edc9b6..5e570bb0bb6f 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -204,7 +204,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
uar = &to_mucontext(context)->uar;
} else {
- err = mlx4_ib_db_alloc(dev, &cq->db, 1);
+ err = mlx4_db_alloc(dev->dev, &cq->db, 1);
if (err)
goto err_cq;
@@ -250,7 +250,7 @@ err_mtt:
err_db:
if (!context)
- mlx4_ib_db_free(dev, &cq->db);
+ mlx4_db_free(dev->dev, &cq->db);
err_cq:
kfree(cq);
@@ -435,7 +435,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq)
ib_umem_release(mcq->umem);
} else {
mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1);
- mlx4_ib_db_free(dev, &mcq->db);
+ mlx4_db_free(dev->dev, &mcq->db);
}
kfree(mcq);
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index 1c36087aef14..8e342cc9baec 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -34,124 +34,6 @@
#include "mlx4_ib.h"
-struct mlx4_ib_db_pgdir {
- struct list_head list;
- DECLARE_BITMAP(order0, MLX4_IB_DB_PER_PAGE);
- DECLARE_BITMAP(order1, MLX4_IB_DB_PER_PAGE / 2);
- unsigned long *bits[2];
- __be32 *db_page;
- dma_addr_t db_dma;
-};
-
-static struct mlx4_ib_db_pgdir *mlx4_ib_alloc_db_pgdir(struct mlx4_ib_dev *dev)
-{
- struct mlx4_ib_db_pgdir *pgdir;
-
- pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
- if (!pgdir)
- return NULL;
-
- bitmap_fill(pgdir->order1, MLX4_IB_DB_PER_PAGE / 2);
- pgdir->bits[0] = pgdir->order0;
- pgdir->bits[1] = pgdir->order1;
- pgdir->db_page = dma_alloc_coherent(dev->ib_dev.dma_device,
- PAGE_SIZE, &pgdir->db_dma,
- GFP_KERNEL);
- if (!pgdir->db_page) {
- kfree(pgdir);
- return NULL;
- }
-
- return pgdir;
-}
-
-static int mlx4_ib_alloc_db_from_pgdir(struct mlx4_ib_db_pgdir *pgdir,
- struct mlx4_ib_db *db, int order)
-{
- int o;
- int i;
-
- for (o = order; o <= 1; ++o) {
- i = find_first_bit(pgdir->bits[o], MLX4_IB_DB_PER_PAGE >> o);
- if (i < MLX4_IB_DB_PER_PAGE >> o)
- goto found;
- }
-
- return -ENOMEM;
-
-found:
- clear_bit(i, pgdir->bits[o]);
-
- i <<= o;
-
- if (o > order)
- set_bit(i ^ 1, pgdir->bits[order]);
-
- db->u.pgdir = pgdir;
- db->index = i;
- db->db = pgdir->db_page + db->index;
- db->dma = pgdir->db_dma + db->index * 4;
- db->order = order;
-
- return 0;
-}
-
-int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order)
-{
- struct mlx4_ib_db_pgdir *pgdir;
- int ret = 0;
-
- mutex_lock(&dev->pgdir_mutex);
-
- list_for_each_entry(pgdir, &dev->pgdir_list, list)
- if (!mlx4_ib_alloc_db_from_pgdir(pgdir, db, order))
- goto out;
-
- pgdir = mlx4_ib_alloc_db_pgdir(dev);
- if (!pgdir) {
- ret = -ENOMEM;
- goto out;
- }
-
- list_add(&pgdir->list, &dev->pgdir_list);
-
- /* This should never fail -- we just allocated an empty page: */
- WARN_ON(mlx4_ib_alloc_db_from_pgdir(pgdir, db, order));
-
-out:
- mutex_unlock(&dev->pgdir_mutex);
-
- return ret;
-}
-
-void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db)
-{
- int o;
- int i;
-
- mutex_lock(&dev->pgdir_mutex);
-
- o = db->order;
- i = db->index;
-
- if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
- clear_bit(i ^ 1, db->u.pgdir->order0);
- ++o;
- }
-
- i >>= o;
- set_bit(i, db->u.pgdir->bits[o]);
-
- if (bitmap_full(db->u.pgdir->order1, MLX4_IB_DB_PER_PAGE / 2)) {
- dma_free_coherent(dev->ib_dev.dma_device, PAGE_SIZE,
- db->u.pgdir->db_page, db->u.pgdir->db_dma);
- list_del(&db->u.pgdir->list);
- kfree(db->u.pgdir);
- }
-
- mutex_unlock(&dev->pgdir_mutex);
-}
-
struct mlx4_ib_user_db_page {
struct list_head list;
struct ib_umem *umem;
@@ -160,7 +42,7 @@ struct mlx4_ib_user_db_page {
};
int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
- struct mlx4_ib_db *db)
+ struct mlx4_db *db)
{
struct mlx4_ib_user_db_page *page;
struct ib_umem_chunk *chunk;
@@ -202,7 +84,7 @@ out:
return err;
}
-void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db)
+void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db)
{
mutex_lock(&context->db_page_mutex);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 4d9b5ac42202..4d61e32866c6 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -557,9 +557,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_uar;
MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
- INIT_LIST_HEAD(&ibdev->pgdir_list);
- mutex_init(&ibdev->pgdir_mutex);
-
ibdev->dev = dev;
strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 9e637323c155..5cf994794d25 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -43,24 +43,6 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
-enum {
- MLX4_IB_DB_PER_PAGE = PAGE_SIZE / 4
-};
-
-struct mlx4_ib_db_pgdir;
-struct mlx4_ib_user_db_page;
-
-struct mlx4_ib_db {
- __be32 *db;
- union {
- struct mlx4_ib_db_pgdir *pgdir;
- struct mlx4_ib_user_db_page *user_page;
- } u;
- dma_addr_t dma;
- int index;
- int order;
-};
-
struct mlx4_ib_ucontext {
struct ib_ucontext ibucontext;
struct mlx4_uar uar;
@@ -88,7 +70,7 @@ struct mlx4_ib_cq {
struct mlx4_cq mcq;
struct mlx4_ib_cq_buf buf;
struct mlx4_ib_cq_resize *resize_buf;
- struct mlx4_ib_db db;
+ struct mlx4_db db;
spinlock_t lock;
struct mutex resize_mutex;
struct ib_umem *umem;
@@ -127,7 +109,7 @@ struct mlx4_ib_qp {
struct mlx4_qp mqp;
struct mlx4_buf buf;
- struct mlx4_ib_db db;
+ struct mlx4_db db;
struct mlx4_ib_wq rq;
u32 doorbell_qpn;
@@ -154,7 +136,7 @@ struct mlx4_ib_srq {
struct ib_srq ibsrq;
struct mlx4_srq msrq;
struct mlx4_buf buf;
- struct mlx4_ib_db db;
+ struct mlx4_db db;
u64 *wrid;
spinlock_t lock;
int head;
@@ -175,9 +157,6 @@ struct mlx4_ib_dev {
struct mlx4_dev *dev;
void __iomem *uar_map;
- struct list_head pgdir_list;
- struct mutex pgdir_mutex;
-
struct mlx4_uar priv_uar;
u32 priv_pdn;
MLX4_DECLARE_DOORBELL_LOCK(uar_lock);
@@ -248,11 +227,9 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
return container_of(ibah, struct mlx4_ib_ah, ibah);
}
-int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order);
-void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db);
int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
- struct mlx4_ib_db *db);
-void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db);
+ struct mlx4_db *db);
+void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index b75efae7e449..80ea8b9e7761 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -514,7 +514,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err;
if (!init_attr->srq) {
- err = mlx4_ib_db_alloc(dev, &qp->db, 0);
+ err = mlx4_db_alloc(dev->dev, &qp->db, 0);
if (err)
goto err;
@@ -580,7 +580,7 @@ err_buf:
err_db:
if (!pd->uobject && !init_attr->srq)
- mlx4_ib_db_free(dev, &qp->db);
+ mlx4_db_free(dev->dev, &qp->db);
err:
return err;
@@ -666,7 +666,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
kfree(qp->rq.wrid);
mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
if (!qp->ibqp.srq)
- mlx4_ib_db_free(dev, &qp->db);
+ mlx4_db_free(dev->dev, &qp->db);
}
}
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index beaa3b06cf58..204619702f9d 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -129,7 +129,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
if (err)
goto err_mtt;
} else {
- err = mlx4_ib_db_alloc(dev, &srq->db, 0);
+ err = mlx4_db_alloc(dev->dev, &srq->db, 0);
if (err)
goto err_srq;
@@ -200,7 +200,7 @@ err_buf:
err_db:
if (!pd->uobject)
- mlx4_ib_db_free(dev, &srq->db);
+ mlx4_db_free(dev->dev, &srq->db);
err_srq:
kfree(srq);
@@ -267,7 +267,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq)
kfree(msrq->wrid);
mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
&msrq->buf);
- mlx4_ib_db_free(dev, &msrq->db);
+ mlx4_db_free(dev->dev, &msrq->db);
}
kfree(msrq);
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index b046262ed638..a4e9269a29bd 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -139,8 +139,9 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
addr = ntohl(ifa->ifa_address);
mask = ntohl(ifa->ifa_mask);
- nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %08X, netmask %08X.\n",
- addr, mask);
+ nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address " NIPQUAD_FMT
+ ", netmask " NIPQUAD_FMT ".\n",
+ HIPQUAD(addr), HIPQUAD(mask));
list_for_each_entry(nesdev, &nes_dev_list, list) {
nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n",
nesdev, nesdev->netdev[0]->name);
@@ -353,13 +354,11 @@ struct ib_qp *nes_get_qp(struct ib_device *device, int qpn)
*/
static void nes_print_macaddr(struct net_device *netdev)
{
- nes_debug(NES_DBG_INIT, "%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, IRQ %u\n",
- netdev->name,
- netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
- netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
- netdev->irq);
-}
+ DECLARE_MAC_BUF(mac);
+ nes_debug(NES_DBG_INIT, "%s: %s, IRQ %u\n",
+ netdev->name, print_mac(mac, netdev->dev_addr), netdev->irq);
+}
/**
* nes_interrupt - handle interrupts
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index d0738623bcf3..d940fc27129a 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -852,8 +852,8 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
/* get a handle on the hte */
hte = &cm_core->connected_nodes;
- nes_debug(NES_DBG_CM, "Searching for an owner node:%x:%x from core %p->%p\n",
- loc_addr, loc_port, cm_core, hte);
+ nes_debug(NES_DBG_CM, "Searching for an owner node: " NIPQUAD_FMT ":%x from core %p->%p\n",
+ HIPQUAD(loc_addr), loc_port, cm_core, hte);
/* walk list and find cm_node associated with this session ID */
spin_lock_irqsave(&cm_core->ht_lock, flags);
@@ -902,8 +902,8 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
}
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- nes_debug(NES_DBG_CM, "Unable to find listener- %x:%x\n",
- dst_addr, dst_port);
+ nes_debug(NES_DBG_CM, "Unable to find listener for " NIPQUAD_FMT ":%x\n",
+ HIPQUAD(dst_addr), dst_port);
/* no listener */
return NULL;
@@ -1054,6 +1054,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
int arpindex = 0;
struct nes_device *nesdev;
struct nes_adapter *nesadapter;
+ DECLARE_MAC_BUF(mac);
/* create an hte and cm_node for this instance */
cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
@@ -1066,8 +1067,9 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->loc_port = cm_info->loc_port;
cm_node->rem_port = cm_info->rem_port;
cm_node->send_write0 = send_first;
- nes_debug(NES_DBG_CM, "Make node addresses : loc = %x:%x, rem = %x:%x\n",
- cm_node->loc_addr, cm_node->loc_port, cm_node->rem_addr, cm_node->rem_port);
+ nes_debug(NES_DBG_CM, "Make node addresses : loc = " NIPQUAD_FMT ":%x, rem = " NIPQUAD_FMT ":%x\n",
+ HIPQUAD(cm_node->loc_addr), cm_node->loc_port,
+ HIPQUAD(cm_node->rem_addr), cm_node->rem_port);
cm_node->listener = listener;
cm_node->netdev = nesvnic->netdev;
cm_node->cm_id = cm_info->cm_id;
@@ -1116,11 +1118,8 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
/* copy the mac addr to node context */
memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
- nes_debug(NES_DBG_CM, "Remote mac addr from arp table:%02x,"
- " %02x, %02x, %02x, %02x, %02x\n",
- cm_node->rem_mac[0], cm_node->rem_mac[1],
- cm_node->rem_mac[2], cm_node->rem_mac[3],
- cm_node->rem_mac[4], cm_node->rem_mac[5]);
+ nes_debug(NES_DBG_CM, "Remote mac addr from arp table: %s\n",
+ print_mac(mac, cm_node->rem_mac));
add_hte_node(cm_core, cm_node);
atomic_inc(&cm_nodes_created);
@@ -1850,8 +1849,10 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvni
nfo.rem_addr = ntohl(iph->saddr);
nfo.rem_port = ntohs(tcph->source);
- nes_debug(NES_DBG_CM, "Received packet: dest=0x%08X:0x%04X src=0x%08X:0x%04X\n",
- iph->daddr, tcph->dest, iph->saddr, tcph->source);
+ nes_debug(NES_DBG_CM, "Received packet: dest=" NIPQUAD_FMT
+ ":0x%04X src=" NIPQUAD_FMT ":0x%04X\n",
+ NIPQUAD(iph->daddr), tcph->dest,
+ NIPQUAD(iph->saddr), tcph->source);
/* note: this call is going to increment cm_node ref count */
cm_node = find_node(cm_core,
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index aa53aab91bf8..08964cc7e98a 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -636,6 +636,15 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n");
return 0;
}
+
+ i = 0;
+ while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
+ mdelay(1);
+ if (i >= 10000) {
+ printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
+ nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
+ return 0;
+ }
}
/* port reset */
@@ -684,17 +693,6 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
}
}
-
-
- i = 0;
- while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
- mdelay(1);
- if (i >= 10000) {
- printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
- nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
- return 0;
- }
-
return port_count;
}
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index b7e2844f096b..8f36e231bdf5 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -905,7 +905,7 @@ struct nes_hw_qp {
};
struct nes_hw_cq {
- struct nes_hw_cqe volatile *cq_vbase; /* PCI memory for host rings */
+ struct nes_hw_cqe *cq_vbase; /* PCI memory for host rings */
void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq);
dma_addr_t cq_pbase; /* PCI memory for host rings */
u16 cq_head;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 01cd0effc492..e5366b013c1a 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -787,16 +787,14 @@ static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
int i;
u32 macaddr_low;
u16 macaddr_high;
+ DECLARE_MAC_BUF(mac);
if (!is_valid_ether_addr(mac_addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
- printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n",
- __func__, netdev->addr_len,
- mac_addr->sa_data[0], mac_addr->sa_data[1],
- mac_addr->sa_data[2], mac_addr->sa_data[3],
- mac_addr->sa_data[4], mac_addr->sa_data[5]);
+ printk(PFX "%s: Address length = %d, Address = %s\n",
+ __func__, netdev->addr_len, print_mac(mac, mac_addr->sa_data));
macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
macaddr_high += (u16)netdev->dev_addr[1];
macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
@@ -878,11 +876,11 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
if (mc_nic_index < 0)
mc_nic_index = nesvnic->nic_index;
if (multicast_addr) {
- nes_debug(NES_DBG_NIC_RX, "Assigning MC Address = %02X%02X%02X%02X%02X%02X to register 0x%04X nic_idx=%d\n",
- multicast_addr->dmi_addr[0], multicast_addr->dmi_addr[1],
- multicast_addr->dmi_addr[2], multicast_addr->dmi_addr[3],
- multicast_addr->dmi_addr[4], multicast_addr->dmi_addr[5],
- perfect_filter_register_address+(mc_index * 8), mc_nic_index);
+ DECLARE_MAC_BUF(mac);
+ nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %s to register 0x%04X nic_idx=%d\n",
+ print_mac(mac, multicast_addr->dmi_addr),
+ perfect_filter_register_address+(mc_index * 8),
+ mc_nic_index);
macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8;
macaddr_high += (u16)multicast_addr->dmi_addr[1];
macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index f9db07c2717d..c6d5631a6995 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -660,7 +660,9 @@ int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 acti
/* DELETE or RESOLVE */
if (arp_index == nesadapter->arp_table_size) {
- nes_debug(NES_DBG_NETDEV, "mac address not in ARP table - cannot delete or resolve\n");
+ nes_debug(NES_DBG_NETDEV, "MAC for " NIPQUAD_FMT " not in ARP table - cannot %s\n",
+ HIPQUAD(ip_addr),
+ action == NES_ARP_RESOLVE ? "resolve" : "delete");
return -1;
}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index f9a5d4390892..ee74f7c7a6da 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1976,7 +1976,7 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
if (nescq->cq_mem_size)
pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size,
- (void *)nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase);
+ nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase);
kfree(nescq);
return ret;
@@ -3610,6 +3610,12 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
while (cqe_count < num_entries) {
if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
NES_CQE_VALID) {
+ /*
+ * Make sure we read CQ entry contents *after*
+ * we've checked the valid bit.
+ */
+ rmb();
+
cqe = nescq->hw_cq.cq_vbase[head];
nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 73b2b176ad0e..f1f142dc64b1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -56,11 +56,11 @@
/* constants */
enum {
- IPOIB_PACKET_SIZE = 2048,
- IPOIB_BUF_SIZE = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
-
IPOIB_ENCAP_LEN = 4,
+ IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
+ IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
+
IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */
IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN,
IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
@@ -139,7 +139,7 @@ struct ipoib_mcast {
struct ipoib_rx_buf {
struct sk_buff *skb;
- u64 mapping;
+ u64 mapping[IPOIB_UD_RX_SG];
};
struct ipoib_tx_buf {
@@ -294,6 +294,7 @@ struct ipoib_dev_priv {
unsigned int admin_mtu;
unsigned int mcast_mtu;
+ unsigned int max_ib_mtu;
struct ipoib_rx_buf *rx_ring;
@@ -305,6 +306,9 @@ struct ipoib_dev_priv {
struct ib_send_wr tx_wr;
unsigned tx_outstanding;
+ struct ib_recv_wr rx_wr;
+ struct ib_sge rx_sge[IPOIB_UD_RX_SG];
+
struct ib_wc ibwc[IPOIB_NUM_WC];
struct list_head dead_ahs;
@@ -366,6 +370,14 @@ struct ipoib_neigh {
struct list_head list;
};
+#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN)
+#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES)
+
+static inline int ipoib_ud_need_sg(unsigned int ib_mtu)
+{
+ return IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE;
+}
+
/*
* We stash a pointer to our private neighbour information after our
* hardware address in neigh->ha. The ALIGN() expression here makes
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0205eb7c1bd3..7cf1fa7074ab 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -89,28 +89,59 @@ void ipoib_free_ah(struct kref *kref)
spin_unlock_irqrestore(&priv->lock, flags);
}
+static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
+ u64 mapping[IPOIB_UD_RX_SG])
+{
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
+ DMA_FROM_DEVICE);
+ ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ } else
+ ib_dma_unmap_single(priv->ca, mapping[0],
+ IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
+ DMA_FROM_DEVICE);
+}
+
+static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
+ struct sk_buff *skb,
+ unsigned int length)
+{
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int size;
+ /*
+ * There is only two buffers needed for max_payload = 4K,
+ * first buf size is IPOIB_UD_HEAD_SIZE
+ */
+ skb->tail += IPOIB_UD_HEAD_SIZE;
+ skb->len += length;
+
+ size = length - IPOIB_UD_HEAD_SIZE;
+
+ frag->size = size;
+ skb->data_len += size;
+ skb->truesize += size;
+ } else
+ skb_put(skb, length);
+
+}
+
static int ipoib_ib_post_receive(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ib_sge list;
- struct ib_recv_wr param;
struct ib_recv_wr *bad_wr;
int ret;
- list.addr = priv->rx_ring[id].mapping;
- list.length = IPOIB_BUF_SIZE;
- list.lkey = priv->mr->lkey;
+ priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
+ priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
+ priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
- param.next = NULL;
- param.wr_id = id | IPOIB_OP_RECV;
- param.sg_list = &list;
- param.num_sge = 1;
- ret = ib_post_recv(priv->qp, &param, &bad_wr);
+ ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
if (unlikely(ret)) {
ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
- ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
dev_kfree_skb_any(priv->rx_ring[id].skb);
priv->rx_ring[id].skb = NULL;
}
@@ -118,15 +149,21 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
return ret;
}
-static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
+static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
- u64 addr;
+ int buf_size;
+ u64 *mapping;
- skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
- if (!skb)
- return -ENOMEM;
+ if (ipoib_ud_need_sg(priv->max_ib_mtu))
+ buf_size = IPOIB_UD_HEAD_SIZE;
+ else
+ buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+
+ skb = dev_alloc_skb(buf_size + 4);
+ if (unlikely(!skb))
+ return NULL;
/*
* IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
@@ -135,17 +172,32 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
*/
skb_reserve(skb, 4);
- addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
- dev_kfree_skb_any(skb);
- return -EIO;
+ mapping = priv->rx_ring[id].mapping;
+ mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
+ goto error;
+
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ struct page *page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ goto partial_error;
+ skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
+ mapping[1] =
+ ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
+ 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
+ goto partial_error;
}
- priv->rx_ring[id].skb = skb;
- priv->rx_ring[id].mapping = addr;
+ priv->rx_ring[id].skb = skb;
+ return skb;
- return 0;
+partial_error:
+ ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
+error:
+ dev_kfree_skb_any(skb);
+ return NULL;
}
static int ipoib_ib_post_receives(struct net_device *dev)
@@ -154,7 +206,7 @@ static int ipoib_ib_post_receives(struct net_device *dev)
int i;
for (i = 0; i < ipoib_recvq_size; ++i) {
- if (ipoib_alloc_rx_skb(dev, i)) {
+ if (!ipoib_alloc_rx_skb(dev, i)) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
return -ENOMEM;
}
@@ -172,7 +224,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
struct ipoib_dev_priv *priv = netdev_priv(dev);
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
struct sk_buff *skb;
- u64 addr;
+ u64 mapping[IPOIB_UD_RX_SG];
ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
wr_id, wc->status);
@@ -184,15 +236,13 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
}
skb = priv->rx_ring[wr_id].skb;
- addr = priv->rx_ring[wr_id].mapping;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
if (wc->status != IB_WC_WR_FLUSH_ERR)
ipoib_warn(priv, "failed recv event "
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
- ib_dma_unmap_single(priv->ca, addr,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
dev_kfree_skb_any(skb);
priv->rx_ring[wr_id].skb = NULL;
return;
@@ -205,11 +255,14 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
goto repost;
+ memcpy(mapping, priv->rx_ring[wr_id].mapping,
+ IPOIB_UD_RX_SG * sizeof *mapping);
+
/*
* If we can't allocate a new RX buffer, dump
* this packet and reuse the old buffer.
*/
- if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
+ if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
++dev->stats.rx_dropped;
goto repost;
}
@@ -217,9 +270,9 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid);
- ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ipoib_ud_dma_unmap_rx(priv, mapping);
+ ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
- skb_put(skb, wc->byte_len);
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
@@ -733,10 +786,8 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
rx_req = &priv->rx_ring[i];
if (!rx_req->skb)
continue;
- ib_dma_unmap_single(priv->ca,
- rx_req->mapping,
- IPOIB_BUF_SIZE,
- DMA_FROM_DEVICE);
+ ipoib_ud_dma_unmap_rx(priv,
+ priv->rx_ring[i].mapping);
dev_kfree_skb_any(rx_req->skb);
rx_req->skb = NULL;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index bd07f02cf02b..7a4ed9d3d844 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -195,7 +195,7 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
- if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
+ if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
return -EINVAL;
priv->admin_mtu = new_mtu;
@@ -971,10 +971,6 @@ static void ipoib_setup(struct net_device *dev)
NETIF_F_LLTX |
NETIF_F_HIGHDMA);
- /* MTU will be reset when mcast join happens */
- dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
- priv->mcast_mtu = priv->admin_mtu = dev->mtu;
-
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
netif_carrier_off(dev);
@@ -1107,6 +1103,7 @@ static struct net_device *ipoib_add_port(const char *format,
{
struct ipoib_dev_priv *priv;
struct ib_device_attr *device_attr;
+ struct ib_port_attr attr;
int result = -ENOMEM;
priv = ipoib_intf_alloc(format);
@@ -1115,6 +1112,18 @@ static struct net_device *ipoib_add_port(const char *format,
SET_NETDEV_DEV(priv->dev, hca->dma_device);
+ if (!ib_query_port(hca, port, &attr))
+ priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+ else {
+ printk(KERN_WARNING "%s: ib_query_port %d failed\n",
+ hca->name, port);
+ goto device_init_failed;
+ }
+
+ /* MTU will be reset when mcast join happens */
+ priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
+ priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
+
result = ib_query_pkey(hca, port, 0, &priv->pkey);
if (result) {
printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 31a53c5bcb13..d00a2c174aee 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -567,8 +567,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
return;
}
- priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) -
- IPOIB_ENCAP_LEN;
+ priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
if (!ipoib_cm_admin_enabled(dev))
dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 8a20e3742c43..07c03f178a49 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -150,7 +150,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
.max_send_wr = ipoib_sendq_size,
.max_recv_wr = ipoib_recvq_size,
.max_send_sge = 1,
- .max_recv_sge = 1
+ .max_recv_sge = IPOIB_UD_RX_SG
},
.sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_UD
@@ -215,6 +215,19 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->tx_wr.sg_list = priv->tx_sge;
priv->tx_wr.send_flags = IB_SEND_SIGNALED;
+ priv->rx_sge[0].lkey = priv->mr->lkey;
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE;
+ priv->rx_sge[1].length = PAGE_SIZE;
+ priv->rx_sge[1].lkey = priv->mr->lkey;
+ priv->rx_wr.num_sge = IPOIB_UD_RX_SG;
+ } else {
+ priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+ priv->rx_wr.num_sge = 1;
+ }
+ priv->rx_wr.next = NULL;
+ priv->rx_wr.sg_list = priv->rx_sge;
+
return 0;
out_free_cq:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 293f5b892e3f..431fdeaa2dc4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -89,6 +89,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
goto err;
}
+ priv->max_ib_mtu = ppriv->max_ib_mtu;
set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
priv->pkey = pkey;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 4b07bdadb81e..b29e3affb805 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -444,6 +444,23 @@ exit:
__FUNCTION__, retval);
}
+static void xpad_bulk_out(struct urb *urb)
+{
+ switch (urb->status) {
+ case 0:
+ /* success */
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated, clean up */
+ dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
+ break;
+ default:
+ dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status);
+ }
+}
+
#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS)
static void xpad_irq_out(struct urb *urb)
{
@@ -475,23 +492,6 @@ exit:
__FUNCTION__, retval);
}
-static void xpad_bulk_out(struct urb *urb)
-{
- switch (urb->status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
- break;
- default:
- dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status);
- }
-}
-
static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
{
struct usb_endpoint_descriptor *ep_irq_out;
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 23ae66c76d47..24c6b7ca62be 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -350,7 +350,7 @@ static void capincci_free(struct capidev *cdev, u32 ncci)
if (ncci == 0xffffffff || np->ncci == ncci) {
*pp = (*pp)->next;
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- if ((mp = np->minorp) != 0) {
+ if ((mp = np->minorp) != NULL) {
#if defined(CONFIG_ISDN_CAPI_CAPIFS) || defined(CONFIG_ISDN_CAPI_CAPIFS_MODULE)
capifs_free_ncci(mp->minor);
#endif
@@ -366,7 +366,7 @@ static void capincci_free(struct capidev *cdev, u32 ncci)
}
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
kfree(np);
- if (*pp == 0) return;
+ if (*pp == NULL) return;
} else {
pp = &(*pp)->next;
}
@@ -483,7 +483,7 @@ static int handle_recv_skb(struct capiminor *mp, struct sk_buff *skb)
#endif
goto bad;
}
- if ((nskb = gen_data_b3_resp_for(mp, skb)) == 0) {
+ if ((nskb = gen_data_b3_resp_for(mp, skb)) == NULL) {
printk(KERN_ERR "capi: gen_data_b3_resp failed\n");
goto bad;
}
@@ -512,7 +512,7 @@ bad:
static void handle_minor_recv(struct capiminor *mp)
{
struct sk_buff *skb;
- while ((skb = skb_dequeue(&mp->inqueue)) != 0) {
+ while ((skb = skb_dequeue(&mp->inqueue)) != NULL) {
unsigned int len = skb->len;
mp->inbytes -= len;
if (handle_recv_skb(mp, skb) < 0) {
@@ -538,7 +538,7 @@ static int handle_minor_send(struct capiminor *mp)
return 0;
}
- while ((skb = skb_dequeue(&mp->outqueue)) != 0) {
+ while ((skb = skb_dequeue(&mp->outqueue)) != NULL) {
datahandle = mp->datahandle;
len = (u16)skb->len;
skb_push(skb, CAPI_DATA_B3_REQ_LEN);
@@ -689,19 +689,19 @@ capi_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
if (!cdev->ap.applid)
return -ENODEV;
- if ((skb = skb_dequeue(&cdev->recvqueue)) == 0) {
+ if ((skb = skb_dequeue(&cdev->recvqueue)) == NULL) {
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
for (;;) {
interruptible_sleep_on(&cdev->recvwait);
- if ((skb = skb_dequeue(&cdev->recvqueue)) != 0)
+ if ((skb = skb_dequeue(&cdev->recvqueue)) != NULL)
break;
if (signal_pending(current))
break;
}
- if (skb == 0)
+ if (skb == NULL)
return -ERESTARTNOHAND;
}
if (skb->len > count) {
@@ -940,12 +940,12 @@ capi_ioctl(struct inode *inode, struct file *file,
return -EFAULT;
mutex_lock(&cdev->ncci_list_mtx);
- if ((nccip = capincci_find(cdev, (u32) ncci)) == 0) {
+ if ((nccip = capincci_find(cdev, (u32) ncci)) == NULL) {
mutex_unlock(&cdev->ncci_list_mtx);
return 0;
}
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- if ((mp = nccip->minorp) != 0) {
+ if ((mp = nccip->minorp) != NULL) {
count += atomic_read(&mp->ttyopencount);
}
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
@@ -966,7 +966,7 @@ capi_ioctl(struct inode *inode, struct file *file,
return -EFAULT;
mutex_lock(&cdev->ncci_list_mtx);
nccip = capincci_find(cdev, (u32) ncci);
- if (!nccip || (mp = nccip->minorp) == 0) {
+ if (!nccip || (mp = nccip->minorp) == NULL) {
mutex_unlock(&cdev->ncci_list_mtx);
return -ESRCH;
}
@@ -986,7 +986,7 @@ capi_open(struct inode *inode, struct file *file)
if (file->private_data)
return -EEXIST;
- if ((file->private_data = capidev_alloc()) == 0)
+ if ((file->private_data = capidev_alloc()) == NULL)
return -ENOMEM;
return nonseekable_open(inode, file);
@@ -1023,9 +1023,9 @@ static int capinc_tty_open(struct tty_struct * tty, struct file * file)
struct capiminor *mp;
unsigned long flags;
- if ((mp = capiminor_find(iminor(file->f_path.dentry->d_inode))) == 0)
+ if ((mp = capiminor_find(iminor(file->f_path.dentry->d_inode))) == NULL)
return -ENXIO;
- if (mp->nccip == 0)
+ if (mp->nccip == NULL)
return -ENXIO;
tty->driver_data = (void *)mp;
@@ -1058,7 +1058,7 @@ static void capinc_tty_close(struct tty_struct * tty, struct file * file)
#ifdef _DEBUG_REFCOUNT
printk(KERN_DEBUG "capinc_tty_close ocount=%d\n", atomic_read(&mp->ttyopencount));
#endif
- if (mp->nccip == 0)
+ if (mp->nccip == NULL)
capiminor_free(mp);
}
@@ -1526,9 +1526,9 @@ static int __init capi_init(void)
char *compileinfo;
int major_ret;
- if ((p = strchr(revision, ':')) != 0 && p[1]) {
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
strlcpy(rev, p + 2, sizeof(rev));
- if ((p = strchr(rev, '$')) != 0 && p > rev)
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
*(p-1) = 0;
} else
strcpy(rev, "1.0");
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index cb42b690b45e..d5b4cc357a3c 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -335,7 +335,7 @@ static capidrv_plci *new_plci(capidrv_contr * card, int chan)
plcip = kzalloc(sizeof(capidrv_plci), GFP_ATOMIC);
- if (plcip == 0)
+ if (plcip == NULL)
return NULL;
plcip->state = ST_PLCI_NONE;
@@ -404,7 +404,7 @@ static inline capidrv_ncci *new_ncci(capidrv_contr * card,
nccip = kzalloc(sizeof(capidrv_ncci), GFP_ATOMIC);
- if (nccip == 0)
+ if (nccip == NULL)
return NULL;
nccip->ncci = ncci;
@@ -426,7 +426,7 @@ static inline capidrv_ncci *find_ncci(capidrv_contr * card, u32 ncci)
capidrv_plci *plcip;
capidrv_ncci *p;
- if ((plcip = find_plci_by_ncci(card, ncci)) == 0)
+ if ((plcip = find_plci_by_ncci(card, ncci)) == NULL)
return NULL;
for (p = plcip->ncci_list; p; p = p->next)
@@ -441,7 +441,7 @@ static inline capidrv_ncci *find_ncci_by_msgid(capidrv_contr * card,
capidrv_plci *plcip;
capidrv_ncci *p;
- if ((plcip = find_plci_by_ncci(card, ncci)) == 0)
+ if ((plcip = find_plci_by_ncci(card, ncci)) == NULL)
return NULL;
for (p = plcip->ncci_list; p; p = p->next)
@@ -755,7 +755,7 @@ static inline int new_bchan(capidrv_contr * card)
{
int i;
for (i = 0; i < card->nbchan; i++) {
- if (card->bchans[i].plcip == 0) {
+ if (card->bchans[i].plcip == NULL) {
card->bchans[i].disconnecting = 0;
return i;
}
@@ -877,7 +877,7 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg)
return;
}
bchan = &card->bchans[chan];
- if ((plcip = new_plci(card, chan)) == 0) {
+ if ((plcip = new_plci(card, chan)) == NULL) {
printk(KERN_ERR "capidrv-%d: incoming call: no memory, sorry.\n", card->contrnr);
return;
}
@@ -1388,12 +1388,12 @@ static void capidrv_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
_cdebbuf *cdb = capi_cmsg2str(&s_cmsg);
if (cdb) {
- printk(KERN_DEBUG "%s: applid=%d %s\n", __FUNCTION__,
+ printk(KERN_DEBUG "%s: applid=%d %s\n", __func__,
ap->applid, cdb->buf);
cdebbuf_free(cdb);
} else
printk(KERN_DEBUG "%s: applid=%d %s not traced\n",
- __FUNCTION__, ap->applid,
+ __func__, ap->applid,
capi_cmd2str(s_cmsg.Command, s_cmsg.Subcommand));
}
if (s_cmsg.Command == CAPI_DATA_B3
@@ -1661,7 +1661,7 @@ static int capidrv_command(isdn_ctrl * c, capidrv_contr * card)
NULL, /* Useruserdata */
NULL /* Facilitydataarray */
);
- if ((plcip = new_plci(card, (c->arg % card->nbchan))) == 0) {
+ if ((plcip = new_plci(card, (c->arg % card->nbchan))) == NULL) {
cmd.command = ISDN_STAT_DHUP;
cmd.driver = card->myid;
cmd.arg = (c->arg % card->nbchan);
@@ -1966,7 +1966,7 @@ static void enable_dchannel_trace(capidrv_contr *card)
card->name, errcode);
return;
}
- if (strstr(manufacturer, "AVM") == 0) {
+ if (strstr(manufacturer, "AVM") == NULL) {
printk(KERN_ERR "%s: not from AVM, no d-channel trace possible (%s)\n",
card->name, manufacturer);
return;
@@ -2291,10 +2291,10 @@ static int __init capidrv_init(void)
u32 ncontr, contr;
u16 errcode;
- if ((p = strchr(revision, ':')) != 0 && p[1]) {
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
strncpy(rev, p + 2, sizeof(rev));
rev[sizeof(rev)-1] = 0;
- if ((p = strchr(rev, '$')) != 0 && p > rev)
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
*(p-1) = 0;
} else
strcpy(rev, "1.0");
@@ -2335,10 +2335,10 @@ static void __exit capidrv_exit(void)
char rev[32];
char *p;
- if ((p = strchr(revision, ':')) != 0) {
+ if ((p = strchr(revision, ':')) != NULL) {
strncpy(rev, p + 1, sizeof(rev));
rev[sizeof(rev)-1] = 0;
- if ((p = strchr(rev, '$')) != 0)
+ if ((p = strchr(rev, '$')) != NULL)
*p = 0;
} else {
strcpy(rev, " ??? ");
diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c
index 6d7c47ec0367..550e80f390a6 100644
--- a/drivers/isdn/capi/capifs.c
+++ b/drivers/isdn/capi/capifs.c
@@ -69,6 +69,7 @@ static int capifs_remount(struct super_block *s, int *flags, char *data)
} else if (sscanf(this_char, "mode=%o%c", &n, &dummy) == 1)
mode = n & ~S_IFMT;
else {
+ kfree(new_opt);
printk("capifs: called with bogus options\n");
return -EINVAL;
}
@@ -189,9 +190,9 @@ static int __init capifs_init(void)
char *p;
int err;
- if ((p = strchr(revision, ':')) != 0 && p[1]) {
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
strlcpy(rev, p + 2, sizeof(rev));
- if ((p = strchr(rev, '$')) != 0 && p > rev)
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
*(p-1) = 0;
} else
strcpy(rev, "1.0");
diff --git a/drivers/isdn/capi/capilib.c b/drivers/isdn/capi/capilib.c
index 68409d971e73..fcaa1241ee77 100644
--- a/drivers/isdn/capi/capilib.c
+++ b/drivers/isdn/capi/capilib.c
@@ -4,7 +4,7 @@
#include <linux/isdn/capilli.h>
#define DBG(format, arg...) do { \
-printk(KERN_DEBUG "%s: " format "\n" , __FUNCTION__ , ## arg); \
+printk(KERN_DEBUG "%s: " format "\n" , __func__ , ## arg); \
} while (0)
struct capilib_msgidqueue {
@@ -44,7 +44,7 @@ static inline void mq_init(struct capilib_ncci * np)
static inline int mq_enqueue(struct capilib_ncci * np, u16 msgid)
{
struct capilib_msgidqueue *mq;
- if ((mq = np->msgidfree) == 0)
+ if ((mq = np->msgidfree) == NULL)
return 0;
np->msgidfree = mq->next;
mq->msgid = msgid;
diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c
index 22379b94e88f..ebef4ce1b00c 100644
--- a/drivers/isdn/capi/capiutil.c
+++ b/drivers/isdn/capi/capiutil.c
@@ -450,7 +450,7 @@ static void pars_2_message(_cmsg * cmsg)
cmsg->l += 4;
break;
case _CSTRUCT:
- if (*(u8 **) OFF == 0) {
+ if (*(u8 **) OFF == NULL) {
*(cmsg->m + cmsg->l) = '\0';
cmsg->l++;
} else if (**(_cstruct *) OFF != 0xff) {
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index f55531869313..75726ea0fbbd 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -10,7 +10,7 @@
*
*/
-#define CONFIG_AVMB1_COMPAT
+#define AVMB1_COMPAT
#include "kcapi.h"
#include <linux/module.h>
@@ -29,7 +29,7 @@
#include <asm/uaccess.h>
#include <linux/isdn/capicmd.h>
#include <linux/isdn/capiutil.h>
-#ifdef CONFIG_AVMB1_COMPAT
+#ifdef AVMB1_COMPAT
#include <linux/b1lli.h>
#endif
#include <linux/mutex.h>
@@ -154,7 +154,7 @@ static void register_appl(struct capi_ctr *card, u16 applid, capi_register_param
if (card)
card->register_appl(card, applid, rparam);
else
- printk(KERN_WARNING "%s: cannot get card resources\n", __FUNCTION__);
+ printk(KERN_WARNING "%s: cannot get card resources\n", __func__);
}
@@ -178,7 +178,7 @@ static void notify_up(u32 contr)
printk(KERN_DEBUG "kcapi: notify up contr %d\n", contr);
}
if (!card) {
- printk(KERN_WARNING "%s: invalid contr %d\n", __FUNCTION__, contr);
+ printk(KERN_WARNING "%s: invalid contr %d\n", __func__, contr);
return;
}
for (applid = 1; applid <= CAPI_MAXAPPL; applid++) {
@@ -740,7 +740,7 @@ u16 capi20_get_profile(u32 contr, struct capi_profile *profp)
EXPORT_SYMBOL(capi20_get_profile);
-#ifdef CONFIG_AVMB1_COMPAT
+#ifdef AVMB1_COMPAT
static int old_capi_manufacturer(unsigned int cmd, void __user *data)
{
avmb1_loadandconfigdef ldef;
@@ -826,7 +826,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
card = capi_ctr_get(card);
if (!card)
return -ESRCH;
- if (card->load_firmware == 0) {
+ if (card->load_firmware == NULL) {
printk(KERN_DEBUG "kcapi: load: no load function\n");
return -ESRCH;
}
@@ -835,7 +835,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
printk(KERN_DEBUG "kcapi: load: invalid parameter: length of t4file is %d ?\n", ldef.t4file.len);
return -EINVAL;
}
- if (ldef.t4file.data == 0) {
+ if (ldef.t4file.data == NULL) {
printk(KERN_DEBUG "kcapi: load: invalid parameter: dataptr is 0\n");
return -EINVAL;
}
@@ -904,7 +904,7 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
struct capi_ctr *card;
switch (cmd) {
-#ifdef CONFIG_AVMB1_COMPAT
+#ifdef AVMB1_COMPAT
case AVMB1_LOAD:
case AVMB1_LOAD_AND_CONFIG:
case AVMB1_RESETCARD:
@@ -951,7 +951,7 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
if (strcmp(driver->name, cdef.driver) == 0)
break;
}
- if (driver == 0) {
+ if (driver == NULL) {
printk(KERN_ERR "kcapi: driver \"%s\" not loaded.\n",
cdef.driver);
return -ESRCH;
@@ -1004,9 +1004,9 @@ static int __init kcapi_init(void)
return ret;
kcapi_proc_init();
- if ((p = strchr(revision, ':')) != 0 && p[1]) {
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
strlcpy(rev, p + 2, sizeof(rev));
- if ((p = strchr(rev, '$')) != 0 && p > rev)
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
*(p-1) = 0;
} else
strcpy(rev, "1.0");
diff --git a/drivers/isdn/capi/kcapi.h b/drivers/isdn/capi/kcapi.h
index 1cb2c40f9921..244711f7f838 100644
--- a/drivers/isdn/capi/kcapi.h
+++ b/drivers/isdn/capi/kcapi.h
@@ -17,7 +17,7 @@
#ifdef KCAPI_DEBUG
#define DBG(format, arg...) do { \
-printk(KERN_DEBUG "%s: " format "\n" , __FUNCTION__ , ## arg); \
+printk(KERN_DEBUG "%s: " format "\n" , __func__ , ## arg); \
} while (0)
#else
#define DBG(format, arg...) /* */
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index 4484a6417235..abf05ec31760 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/isdn/hardware/avm/b1.c
@@ -661,11 +661,11 @@ int b1ctl_read_proc(char *page, char **start, off_t off,
len += sprintf(page+len, "%-16s %s\n", "type", s);
if (card->cardtype == avm_t1isa)
len += sprintf(page+len, "%-16s %d\n", "cardnr", card->cardnr);
- if ((s = cinfo->version[VER_DRIVER]) != 0)
+ if ((s = cinfo->version[VER_DRIVER]) != NULL)
len += sprintf(page+len, "%-16s %s\n", "ver_driver", s);
- if ((s = cinfo->version[VER_CARDTYPE]) != 0)
+ if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s);
- if ((s = cinfo->version[VER_SERIAL]) != 0)
+ if ((s = cinfo->version[VER_SERIAL]) != NULL)
len += sprintf(page+len, "%-16s %s\n", "ver_serial", s);
if (card->cardtype != avm_m1) {
@@ -788,9 +788,9 @@ static int __init b1_init(void)
char *p;
char rev[32];
- if ((p = strchr(revision, ':')) != 0 && p[1]) {
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != 0 && p > rev)
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
*(p-1) = 0;
} else
strcpy(rev, "1.0");
diff --git a/drivers/isdn/hardware/avm/b1dma.c b/drivers/isdn/hardware/avm/b1dma.c
index 669f6f67449c..da34b98e3de7 100644
--- a/drivers/isdn/hardware/avm/b1dma.c
+++ b/drivers/isdn/hardware/avm/b1dma.c
@@ -883,11 +883,11 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
default: s = "???"; break;
}
len += sprintf(page+len, "%-16s %s\n", "type", s);
- if ((s = cinfo->version[VER_DRIVER]) != 0)
+ if ((s = cinfo->version[VER_DRIVER]) != NULL)
len += sprintf(page+len, "%-16s %s\n", "ver_driver", s);
- if ((s = cinfo->version[VER_CARDTYPE]) != 0)
+ if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s);
- if ((s = cinfo->version[VER_SERIAL]) != 0)
+ if ((s = cinfo->version[VER_SERIAL]) != NULL)
len += sprintf(page+len, "%-16s %s\n", "ver_serial", s);
if (card->cardtype != avm_m1) {
@@ -970,9 +970,9 @@ static int __init b1dma_init(void)
char *p;
char rev[32];
- if ((p = strchr(revision, ':')) != 0 && p[1]) {
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
strlcpy(rev, p + 2, sizeof(rev));
- if ((p = strchr(rev, '$')) != 0 && p > rev)
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
*(p-1) = 0;
} else
strcpy(rev, "1.0");
diff --git a/drivers/isdn/hardware/avm/b1isa.c b/drivers/isdn/hardware/avm/b1isa.c
index 80fb488848b8..1e288eeb5e2a 100644
--- a/drivers/isdn/hardware/avm/b1isa.c
+++ b/drivers/isdn/hardware/avm/b1isa.c
@@ -203,9 +203,9 @@ static int __init b1isa_init(void)
char rev[32];
int i;
- if ((p = strchr(revision, ':')) != 0 && p[1]) {
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != 0 && p > rev)
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
*(p-1) = 0;
} else
strcpy(rev, "1.0");
diff --git a/drivers/isdn/hardware/avm/b1pci.c b/drivers/isdn/hardware/avm/b1pci.c
index 90e2e6643d19..5b314a2c4049 100644
--- a/drivers/isdn/hardware/avm/b1pci.c
+++ b/drivers/isdn/hardware/avm/b1pci.c
@@ -382,9 +382,9 @@ static int __init b1pci_init(void)
char rev[32];
int err;
- if ((p = strchr(revision, ':')) != 0 && p[1]) {
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != 0 && p > rev)
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
*(p-1) = 0;
} else
strcpy(rev, "1.0");
diff --git a/drivers/isdn/hardware/avm/b1pcmcia.c b/drivers/isdn/hardware/avm/b1pcmcia.c
index e479c0aef38d..7740403b40e1 100644
--- a/drivers/isdn/hardware/avm/b1pcmcia.c
+++ b/drivers/isdn/hardware/avm/b1pcmcia.c
@@ -201,9 +201,9 @@ static int __init b1pcmcia_init(void)
char *p;
char rev[32];
- if ((p = strchr(revision, ':')) != 0 && p[1]) {
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != 0 && p > rev)
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
*(p-1) = 0;
} else
strcpy(rev, "1.0");
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 4bbbbe688077..9df1d3f66c87 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -1088,11 +1088,11 @@ static int c4_read_proc(char *page, char **start, off_t off,
default: s = "???"; break;
}
len += sprintf(page+len, "%-16s %s\n", "type", s);
- if ((s = cinfo->version[VER_DRIVER]) != 0)
+ if ((s = cinfo->version[VER_DRIVER]) != NULL)
len += sprintf(page+len, "%-16s %s\n", "ver_driver", s);
- if ((s = cinfo->version[VER_CARDTYPE]) != 0)
+ if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s);
- if ((s = cinfo->version[VER_SERIAL]) != 0)
+ if ((s = cinfo->version[VER_SERIAL]) != NULL)
len += sprintf(page+len, "%-16s %s\n", "ver_serial", s);
if (card->cardtype != avm_m1) {
@@ -1167,7 +1167,7 @@ static int c4_add_card(struct capicardparams *p, struct pci_dev *dev,
}
card->mbase = ioremap(card->membase, 128);
- if (card->mbase == 0) {
+ if (card->mbase == NULL) {
printk(KERN_NOTICE "c4: can't remap memory at 0x%lx\n",
card->membase);
retval = -EIO;
@@ -1291,9 +1291,9 @@ static int __init c4_init(void)
char rev[32];
int err;
- if ((p = strchr(revision, ':')) != 0 && p[1]) {
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != 0 && p > rev)
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
*(p-1) = 0;
} else
strcpy(rev, "1.0");
diff --git a/drivers/isdn/hardware/avm/t1isa.c b/drivers/isdn/hardware/avm/t1isa.c
index 6130724e46e7..e7724493738c 100644
--- a/drivers/isdn/hardware/avm/t1isa.c
+++ b/drivers/isdn/hardware/avm/t1isa.c
@@ -551,9 +551,9 @@ static int __init t1isa_init(void)
char *p;
int i;
- if ((p = strchr(revision, ':')) != 0 && p[1]) {
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != 0 && p > rev)
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
*(p-1) = 0;
} else
strcpy(rev, "1.0");
diff --git a/drivers/isdn/hardware/avm/t1pci.c b/drivers/isdn/hardware/avm/t1pci.c
index d1e253c94db4..e6d298d75146 100644
--- a/drivers/isdn/hardware/avm/t1pci.c
+++ b/drivers/isdn/hardware/avm/t1pci.c
@@ -233,9 +233,9 @@ static int __init t1pci_init(void)
char rev[32];
int err;
- if ((p = strchr(revision, ':')) != 0 && p[1]) {
+ if ((p = strchr(revision, ':')) != NULL && p[1]) {
strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != 0 && p > rev)
+ if ((p = strchr(rev, '$')) != NULL && p > rev)
*(p-1) = 0;
} else
strcpy(rev, "1.0");
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index 6d39f9360766..5fcbdccd7a53 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -393,7 +393,7 @@ void diva_free_dma_map(void *hdev, struct _diva_dma_map_entry *pmap)
dma_addr_t dma_handle;
void *addr_handle;
- for (i = 0; (pmap != 0); i++) {
+ for (i = 0; (pmap != NULL); i++) {
diva_get_dma_map_entry(pmap, i, &cpu_addr, &phys_addr);
if (!cpu_addr) {
break;
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 1ff98e7eb794..599fed88222d 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -742,7 +742,7 @@ static void start_internal_command (dword Id, PLCI *plci, t_std_internal_comma
else
{
i = 1;
- while (plci->internal_command_queue[i] != 0)
+ while (plci->internal_command_queue[i] != NULL)
i++;
plci->internal_command_queue[i] = command_function;
}
@@ -758,7 +758,7 @@ static void next_internal_command (dword Id, PLCI *plci)
plci->internal_command = 0;
plci->internal_command_queue[0] = NULL;
- while (plci->internal_command_queue[1] != 0)
+ while (plci->internal_command_queue[1] != NULL)
{
for (i = 0; i < MAX_INTERNAL_COMMAND_LEVELS - 1; i++)
plci->internal_command_queue[i] = plci->internal_command_queue[i+1];
@@ -9119,7 +9119,7 @@ word AdvCodecSupport(DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, byte ho
dbug(1,dprintf("AdvSigPlci=0x%x",a->AdvSignalPLCI));
return 0x2001; /* codec in use by another application */
}
- if(plci!=0)
+ if(plci!=NULL)
{
a->AdvSignalPLCI = plci;
plci->tel=ADV_VOICE;
@@ -9144,7 +9144,7 @@ word AdvCodecSupport(DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, byte ho
}
/* indicate D-ch connect if */
} /* codec is connected OK */
- if(plci!=0)
+ if(plci!=NULL)
{
a->AdvSignalPLCI = plci;
plci->tel=ADV_VOICE;
@@ -9170,7 +9170,7 @@ word AdvCodecSupport(DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, byte ho
{
if(hook_listen) return 0x300B; /* Facility not supported */
/* no hook with SCOM */
- if(plci!=0) plci->tel = CODEC;
+ if(plci!=NULL) plci->tel = CODEC;
dbug(1,dprintf("S/SCOM codec"));
/* first time we use the scom-s codec we must shut down the internal */
/* handset application of the card. This can be done by an assign with */
@@ -14604,7 +14604,7 @@ static void channel_xmit_extended_xon (PLCI * plci) {
int max_ch = ARRAY_SIZE(a->ch_flow_control);
int i, one_requested = 0;
- if ((!plci) || (!plci->Id) || ((a = plci->adapter) == 0)) {
+ if ((!plci) || (!plci->Id) || ((a = plci->adapter) == NULL)) {
return;
}
diff --git a/drivers/isdn/hisax/asuscom.c b/drivers/isdn/hisax/asuscom.c
index b96f3184c2e5..1f879b500d83 100644
--- a/drivers/isdn/hisax/asuscom.c
+++ b/drivers/isdn/hisax/asuscom.c
@@ -344,7 +344,7 @@ setup_asuscom(struct IsdnCard *card)
err = pnp_activate_dev(pnp_d);
if (err<0) {
printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __FUNCTION__, err);
+ __func__, err);
return(0);
}
card->para[1] = pnp_port_start(pnp_d, 0);
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index 0f1db1f669b2..7cabc5a19492 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -797,7 +797,7 @@ static int __devinit avm_pnp_setup(struct IsdnCardState *cs)
err = pnp_activate_dev(pnp_avm_d);
if (err<0) {
printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __FUNCTION__, err);
+ __func__, err);
return(0);
}
cs->hw.avm.cfg_reg =
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c
index 2d670856d141..018bd293e580 100644
--- a/drivers/isdn/hisax/diva.c
+++ b/drivers/isdn/hisax/diva.c
@@ -1088,7 +1088,7 @@ static int __devinit setup_diva_isapnp(struct IsdnCard *card)
err = pnp_activate_dev(pnp_d);
if (err<0) {
printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __FUNCTION__, err);
+ __func__, err);
return(0);
}
card->para[1] = pnp_port_start(pnp_d, 0);
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
index 2c3691fda300..aa29d1cf16af 100644
--- a/drivers/isdn/hisax/elsa.c
+++ b/drivers/isdn/hisax/elsa.c
@@ -937,7 +937,7 @@ setup_elsa_isapnp(struct IsdnCard *card)
err = pnp_activate_dev(pnp_d);
if (err<0) {
printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __FUNCTION__, err);
+ __func__, err);
return(0);
}
card->para[1] = pnp_port_start(pnp_d, 0);
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index f4a213877e35..d92e8d6c2ae2 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -1417,7 +1417,7 @@ setup_hfcsx(struct IsdnCard *card)
err = pnp_activate_dev(pnp_d);
if (err<0) {
printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __FUNCTION__, err);
+ __func__, err);
return(0);
}
card->para[1] = pnp_port_start(pnp_d, 0);
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index 98b0149bca68..8df889b0c1a9 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -905,7 +905,7 @@ rx_int_complete(struct urb *urb)
if (status) {
printk(KERN_INFO
"HFC-S USB: %s error resubmitting URB fifo(%d)\n",
- __FUNCTION__, fifon);
+ __func__, fifon);
}
}
@@ -1543,14 +1543,14 @@ hfc_usb_disconnect(struct usb_interface *intf)
stop_isoc_chain(&context->fifos[i]);
DBG(HFCUSB_DBG_INIT,
"HFC-S USB: %s stopping ISOC chain Fifo(%i)",
- __FUNCTION__, i);
+ __func__, i);
}
} else {
if (context->fifos[i].active > 0) {
context->fifos[i].active = 0;
DBG(HFCUSB_DBG_INIT,
"HFC-S USB: %s unlinking URB for Fifo(%i)",
- __FUNCTION__, i);
+ __func__, i);
}
usb_kill_urb(context->fifos[i].urb);
usb_free_urb(context->fifos[i].urb);
diff --git a/drivers/isdn/hisax/hfcscard.c b/drivers/isdn/hisax/hfcscard.c
index 909d6709ec16..cf082665cc8b 100644
--- a/drivers/isdn/hisax/hfcscard.c
+++ b/drivers/isdn/hisax/hfcscard.c
@@ -193,7 +193,7 @@ setup_hfcs(struct IsdnCard *card)
err = pnp_activate_dev(pnp_d);
if (err<0) {
printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __FUNCTION__, err);
+ __func__, err);
return(0);
}
card->para[1] = pnp_port_start(pnp_d, 0);
diff --git a/drivers/isdn/hisax/hisax_debug.h b/drivers/isdn/hisax/hisax_debug.h
index ceafecdb1037..5ed3b1c44184 100644
--- a/drivers/isdn/hisax/hisax_debug.h
+++ b/drivers/isdn/hisax/hisax_debug.h
@@ -27,14 +27,14 @@
#define DBG(level, format, arg...) do { \
if (level & __debug_variable) \
-printk(KERN_DEBUG "%s: " format "\n" , __FUNCTION__ , ## arg); \
+printk(KERN_DEBUG "%s: " format "\n" , __func__ , ## arg); \
} while (0)
#define DBG_PACKET(level,data,count) \
- if (level & __debug_variable) dump_packet(__FUNCTION__,data,count)
+ if (level & __debug_variable) dump_packet(__func__,data,count)
#define DBG_SKB(level,skb) \
- if ((level & __debug_variable) && skb) dump_packet(__FUNCTION__,skb->data,skb->len)
+ if ((level & __debug_variable) && skb) dump_packet(__func__,skb->data,skb->len)
static void __attribute__((unused))
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index 76043dedba5b..c0b4db2f8364 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -68,7 +68,7 @@ static struct pci_device_id fcpci_ids[] = {
MODULE_DEVICE_TABLE(pci, fcpci_ids);
-#ifdef __ISAPNP__
+#ifdef CONFIG_PNP
static struct pnp_device_id fcpnp_ids[] __devinitdata = {
{
.id = "AVM0900",
@@ -914,7 +914,7 @@ static int __devinit fcpci_probe(struct pci_dev *pdev,
return retval;
}
-#ifdef __ISAPNP__
+#ifdef CONFIG_PNP
static int __devinit fcpnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
{
struct fritz_adapter *adapter;
@@ -935,7 +935,7 @@ static int __devinit fcpnp_probe(struct pnp_dev *pdev, const struct pnp_device_i
pnp_disable_dev(pdev);
retval = pnp_activate_dev(pdev);
if (retval < 0) {
- printk(KERN_WARNING "%s: pnp_activate_dev(%s) ret(%d)\n", __FUNCTION__,
+ printk(KERN_WARNING "%s: pnp_activate_dev(%s) ret(%d)\n", __func__,
(char *)dev_id->driver_data, retval);
goto err_free;
}
@@ -974,6 +974,8 @@ static struct pnp_driver fcpnp_driver = {
.remove = __devexit_p(fcpnp_remove),
.id_table = fcpnp_ids,
};
+#else
+static struct pnp_driver fcpnp_driver;
#endif
static void __devexit fcpci_remove(struct pci_dev *pdev)
@@ -1001,7 +1003,7 @@ static int __init hisax_fcpcipnp_init(void)
retval = pci_register_driver(&fcpci_driver);
if (retval)
return retval;
-#ifdef __ISAPNP__
+#ifdef CONFIG_PNP
retval = pnp_register_driver(&fcpnp_driver);
if (retval < 0) {
pci_unregister_driver(&fcpci_driver);
@@ -1013,7 +1015,7 @@ static int __init hisax_fcpcipnp_init(void)
static void __exit hisax_fcpcipnp_exit(void)
{
-#ifdef __ISAPNP__
+#ifdef CONFIG_PNP
pnp_unregister_driver(&fcpnp_driver);
#endif
pci_unregister_driver(&fcpci_driver);
diff --git a/drivers/isdn/hisax/ix1_micro.c b/drivers/isdn/hisax/ix1_micro.c
index 2d18d4f1e57e..a92bf0d2cab2 100644
--- a/drivers/isdn/hisax/ix1_micro.c
+++ b/drivers/isdn/hisax/ix1_micro.c
@@ -252,7 +252,7 @@ setup_ix1micro(struct IsdnCard *card)
err = pnp_activate_dev(pnp_d);
if (err<0) {
printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __FUNCTION__, err);
+ __func__, err);
return(0);
}
card->para[1] = pnp_port_start(pnp_d, 0);
diff --git a/drivers/isdn/hisax/niccy.c b/drivers/isdn/hisax/niccy.c
index 421b8e6763d7..ef00633e1d2a 100644
--- a/drivers/isdn/hisax/niccy.c
+++ b/drivers/isdn/hisax/niccy.c
@@ -255,7 +255,7 @@ int __devinit setup_niccy(struct IsdnCard *card)
err = pnp_activate_dev(pnp_d);
if (err < 0) {
printk(KERN_WARNING "%s: pnp_activate_dev "
- "ret(%d)\n", __FUNCTION__, err);
+ "ret(%d)\n", __func__, err);
return 0;
}
card->para[1] = pnp_port_start(pnp_d, 0);
diff --git a/drivers/isdn/hisax/sedlbauer.c b/drivers/isdn/hisax/sedlbauer.c
index 95425f3d2220..a10dfa82c734 100644
--- a/drivers/isdn/hisax/sedlbauer.c
+++ b/drivers/isdn/hisax/sedlbauer.c
@@ -555,7 +555,7 @@ setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
err = pnp_activate_dev(pnp_d);
if (err<0) {
printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __FUNCTION__, err);
+ __func__, err);
return(0);
}
card->para[1] = pnp_port_start(pnp_d, 0);
diff --git a/drivers/isdn/hisax/st5481.h b/drivers/isdn/hisax/st5481.h
index 04416bad611d..2044e7173ab4 100644
--- a/drivers/isdn/hisax/st5481.h
+++ b/drivers/isdn/hisax/st5481.h
@@ -218,13 +218,13 @@ enum {
#define L1_EVENT_COUNT (EV_TIMER3 + 1)
#define ERR(format, arg...) \
-printk(KERN_ERR "%s:%s: " format "\n" , __FILE__, __FUNCTION__ , ## arg)
+printk(KERN_ERR "%s:%s: " format "\n" , __FILE__, __func__ , ## arg)
#define WARN(format, arg...) \
-printk(KERN_WARNING "%s:%s: " format "\n" , __FILE__, __FUNCTION__ , ## arg)
+printk(KERN_WARNING "%s:%s: " format "\n" , __FILE__, __func__ , ## arg)
#define INFO(format, arg...) \
-printk(KERN_INFO "%s:%s: " format "\n" , __FILE__, __FUNCTION__ , ## arg)
+printk(KERN_INFO "%s:%s: " format "\n" , __FILE__, __func__ , ## arg)
#include "isdnhdlc.h"
#include "fsm.h"
@@ -406,7 +406,7 @@ struct st5481_adapter {
/*
* Submit an URB with error reporting. This is a macro so
- * the __FUNCTION__ returns the caller function name.
+ * the __func__ returns the caller function name.
*/
#define SUBMIT_URB(urb, mem_flags) \
({ \
@@ -470,7 +470,7 @@ extern int st5481_debug;
#ifdef CONFIG_HISAX_DEBUG
#define DBG_ISO_PACKET(level,urb) \
- if (level & __debug_variable) dump_iso_packet(__FUNCTION__,urb)
+ if (level & __debug_variable) dump_iso_packet(__func__,urb)
static void __attribute__((unused))
dump_iso_packet(const char *name, struct urb *urb)
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index 4ada66b8b679..427a8b0520f5 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -342,7 +342,7 @@ void st5481_release_usb(struct st5481_adapter *adapter)
usb_kill_urb(intr->urb);
kfree(intr->urb->transfer_buffer);
usb_free_urb(intr->urb);
- ctrl->urb = NULL;
+ intr->urb = NULL;
}
/*
diff --git a/drivers/isdn/hisax/teles3.c b/drivers/isdn/hisax/teles3.c
index 6a5e379e0774..5dc9f1a43629 100644
--- a/drivers/isdn/hisax/teles3.c
+++ b/drivers/isdn/hisax/teles3.c
@@ -301,7 +301,7 @@ setup_teles3(struct IsdnCard *card)
err = pnp_activate_dev(pnp_d);
if (err<0) {
printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __FUNCTION__, err);
+ __func__, err);
return(0);
}
card->para[3] = pnp_port_start(pnp_d, 2);
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index d4ad6992f776..0f3c66de69bc 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1924,7 +1924,7 @@ isdn_free_channel(int di, int ch, int usage)
if ((di < 0) || (ch < 0)) {
printk(KERN_WARNING "%s: called with invalid drv(%d) or channel(%d)\n",
- __FUNCTION__, di, ch);
+ __func__, di, ch);
return;
}
for (i = 0; i < ISDN_MAX_CHANNELS; i++)
diff --git a/drivers/isdn/i4l/isdn_net.h b/drivers/isdn/i4l/isdn_net.h
index bc2f0dd962ea..be4949715d55 100644
--- a/drivers/isdn/i4l/isdn_net.h
+++ b/drivers/isdn/i4l/isdn_net.h
@@ -108,7 +108,7 @@ static __inline__ void isdn_net_add_to_bundle(isdn_net_dev *nd, isdn_net_local *
lp = nd->queue;
// printk(KERN_DEBUG "%s: lp:%s(%p) nlp:%s(%p) last(%p)\n",
-// __FUNCTION__, lp->name, lp, nlp->name, nlp, lp->last);
+// __func__, lp->name, lp, nlp->name, nlp, lp->last);
nlp->last = lp->last;
lp->last->next = nlp;
lp->last = nlp;
@@ -129,7 +129,7 @@ static __inline__ void isdn_net_rm_from_bundle(isdn_net_local *lp)
master_lp = (isdn_net_local *) lp->master->priv;
// printk(KERN_DEBUG "%s: lp:%s(%p) mlp:%s(%p) last(%p) next(%p) mndq(%p)\n",
-// __FUNCTION__, lp->name, lp, master_lp->name, master_lp, lp->last, lp->next, master_lp->netdev->queue);
+// __func__, lp->name, lp, master_lp->name, master_lp, lp->last, lp->next, master_lp->netdev->queue);
spin_lock_irqsave(&master_lp->netdev->queue_lock, flags);
lp->last->next = lp->next;
lp->next->last = lp->last;
@@ -141,7 +141,7 @@ static __inline__ void isdn_net_rm_from_bundle(isdn_net_local *lp)
}
lp->next = lp->last = lp; /* (re)set own pointers */
// printk(KERN_DEBUG "%s: mndq(%p)\n",
-// __FUNCTION__, master_lp->netdev->queue);
+// __func__, master_lp->netdev->queue);
spin_unlock_irqrestore(&master_lp->netdev->queue_lock, flags);
}
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 9f5fe372f83d..127cfdad68e7 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -110,7 +110,7 @@ isdn_ppp_free(isdn_net_local * lp)
if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
printk(KERN_ERR "%s: ppp_slot(%d) out of range\n",
- __FUNCTION__, lp->ppp_slot);
+ __func__, lp->ppp_slot);
return 0;
}
@@ -127,7 +127,7 @@ isdn_ppp_free(isdn_net_local * lp)
#endif /* CONFIG_ISDN_MPP */
if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
printk(KERN_ERR "%s: ppp_slot(%d) now invalid\n",
- __FUNCTION__, lp->ppp_slot);
+ __func__, lp->ppp_slot);
return 0;
}
is = ippp_table[lp->ppp_slot];
@@ -226,7 +226,7 @@ isdn_ppp_wakeup_daemon(isdn_net_local * lp)
{
if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
printk(KERN_ERR "%s: ppp_slot(%d) out of range\n",
- __FUNCTION__, lp->ppp_slot);
+ __func__, lp->ppp_slot);
return;
}
ippp_table[lp->ppp_slot]->state = IPPP_OPEN | IPPP_CONNECT | IPPP_NOBLOCK;
@@ -245,7 +245,7 @@ isdn_ppp_closewait(int slot)
if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
printk(KERN_ERR "%s: slot(%d) out of range\n",
- __FUNCTION__, slot);
+ __func__, slot);
return 0;
}
is = ippp_table[slot];
@@ -343,7 +343,7 @@ isdn_ppp_release(int min, struct file *file)
is = file->private_data;
if (!is) {
- printk(KERN_ERR "%s: no file->private_data\n", __FUNCTION__);
+ printk(KERN_ERR "%s: no file->private_data\n", __func__);
return;
}
if (is->debug & 0x1)
@@ -353,7 +353,7 @@ isdn_ppp_release(int min, struct file *file)
isdn_net_dev *p = is->lp->netdev;
if (!p) {
- printk(KERN_ERR "%s: no lp->netdev\n", __FUNCTION__);
+ printk(KERN_ERR "%s: no lp->netdev\n", __func__);
return;
}
is->state &= ~IPPP_CONNECT; /* -> effect: no call of wakeup */
@@ -1080,7 +1080,7 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff
printk(KERN_DEBUG "isdn_ppp: VJC_UNCOMP\n");
if (net_dev->local->ppp_slot < 0) {
printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n",
- __FUNCTION__, net_dev->local->ppp_slot);
+ __func__, net_dev->local->ppp_slot);
goto drop_packet;
}
if (slhc_remember(ippp_table[net_dev->local->ppp_slot]->slcomp, skb->data, skb->len) <= 0) {
@@ -1107,7 +1107,7 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff
skb_old->len);
if (net_dev->local->ppp_slot < 0) {
printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n",
- __FUNCTION__, net_dev->local->ppp_slot);
+ __func__, net_dev->local->ppp_slot);
goto drop_packet;
}
pkt_len = slhc_uncompress(ippp_table[net_dev->local->ppp_slot]->slcomp,
@@ -1553,7 +1553,7 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to )
if (lp->ppp_slot < 0) {
printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
- __FUNCTION__, lp->ppp_slot);
+ __func__, lp->ppp_slot);
return(-EINVAL);
}
@@ -1604,7 +1604,7 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
slot = lp->ppp_slot;
if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
printk(KERN_ERR "%s: lp->ppp_slot(%d)\n",
- __FUNCTION__, lp->ppp_slot);
+ __func__, lp->ppp_slot);
stats->frame_drops++;
dev_kfree_skb(skb);
spin_unlock_irqrestore(&mp->lock, flags);
@@ -1641,7 +1641,7 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
slot = lpq->ppp_slot;
if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
printk(KERN_ERR "%s: lpq->ppp_slot(%d)\n",
- __FUNCTION__, lpq->ppp_slot);
+ __func__, lpq->ppp_slot);
} else {
u32 lls = ippp_table[slot]->last_link_seqno;
if (MP_LT(lls, minseq))
@@ -1875,7 +1875,7 @@ void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp,
if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
- __FUNCTION__, lp->ppp_slot);
+ __func__, lp->ppp_slot);
return;
}
if( MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG) ) {
@@ -2655,7 +2655,7 @@ static void isdn_ppp_receive_ccp(isdn_net_dev *net_dev, isdn_net_local *lp,
lp->ppp_slot);
if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
- __FUNCTION__, lp->ppp_slot);
+ __func__, lp->ppp_slot);
return;
}
is = ippp_table[lp->ppp_slot];
@@ -2665,7 +2665,7 @@ static void isdn_ppp_receive_ccp(isdn_net_dev *net_dev, isdn_net_local *lp,
int slot = ((isdn_net_local *) (lp->master->priv))->ppp_slot;
if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
printk(KERN_ERR "%s: slot(%d) out of range\n",
- __FUNCTION__, slot);
+ __func__, slot);
return;
}
mis = ippp_table[slot];
@@ -2829,7 +2829,7 @@ static void isdn_ppp_send_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct
return;
if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
- __FUNCTION__, slot);
+ __func__, slot);
return;
}
is = ippp_table[slot];
@@ -2852,7 +2852,7 @@ static void isdn_ppp_send_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct
slot = ((isdn_net_local *) (lp->master->priv))->ppp_slot;
if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
printk(KERN_ERR "%s: slot(%d) out of range\n",
- __FUNCTION__, slot);
+ __func__, slot);
return;
}
mis = ippp_table[slot];
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 133eb18e65cc..8af0df1d5b8c 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -1347,7 +1347,7 @@ isdn_tty_tiocmget(struct tty_struct *tty, struct file *file)
modem_info *info = (modem_info *) tty->driver_data;
u_char control, status;
- if (isdn_tty_paranoia_check(info, tty->name, __FUNCTION__))
+ if (isdn_tty_paranoia_check(info, tty->name, __func__))
return -ENODEV;
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
@@ -1372,7 +1372,7 @@ isdn_tty_tiocmset(struct tty_struct *tty, struct file *file,
{
modem_info *info = (modem_info *) tty->driver_data;
- if (isdn_tty_paranoia_check(info, tty->name, __FUNCTION__))
+ if (isdn_tty_paranoia_check(info, tty->name, __func__))
return -ENODEV;
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
@@ -1608,7 +1608,7 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_open"))
return -ENODEV;
if (!try_module_get(info->owner)) {
- printk(KERN_WARNING "%s: cannot reserve module\n", __FUNCTION__);
+ printk(KERN_WARNING "%s: cannot reserve module\n", __func__);
return -ENODEV;
}
#ifdef ISDN_DEBUG_MODEM_OPEN
diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c
index 50377e5dc2a3..6f65883aef12 100644
--- a/drivers/md/dm-uevent.c
+++ b/drivers/md/dm-uevent.c
@@ -78,7 +78,7 @@ static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
event = dm_uevent_alloc(md);
if (!event) {
- DMERR("%s: dm_uevent_alloc() failed", __FUNCTION__);
+ DMERR("%s: dm_uevent_alloc() failed", __func__);
goto err_nomem;
}
@@ -86,32 +86,32 @@ static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) {
DMERR("%s: add_uevent_var() for DM_TARGET failed",
- __FUNCTION__);
+ __func__);
goto err_add;
}
if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) {
DMERR("%s: add_uevent_var() for DM_ACTION failed",
- __FUNCTION__);
+ __func__);
goto err_add;
}
if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u",
dm_next_uevent_seq(md))) {
DMERR("%s: add_uevent_var() for DM_SEQNUM failed",
- __FUNCTION__);
+ __func__);
goto err_add;
}
if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) {
- DMERR("%s: add_uevent_var() for DM_PATH failed", __FUNCTION__);
+ DMERR("%s: add_uevent_var() for DM_PATH failed", __func__);
goto err_add;
}
if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d",
nr_valid_paths)) {
DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed",
- __FUNCTION__);
+ __func__);
goto err_add;
}
@@ -146,25 +146,25 @@ void dm_send_uevents(struct list_head *events, struct kobject *kobj)
if (dm_copy_name_and_uuid(event->md, event->name,
event->uuid)) {
DMERR("%s: dm_copy_name_and_uuid() failed",
- __FUNCTION__);
+ __func__);
goto uevent_free;
}
if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) {
DMERR("%s: add_uevent_var() for DM_NAME failed",
- __FUNCTION__);
+ __func__);
goto uevent_free;
}
if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) {
DMERR("%s: add_uevent_var() for DM_UUID failed",
- __FUNCTION__);
+ __func__);
goto uevent_free;
}
r = kobject_uevent_env(kobj, event->action, event->ku_env.envp);
if (r)
- DMERR("%s: kobject_uevent_env failed", __FUNCTION__);
+ DMERR("%s: kobject_uevent_env failed", __func__);
uevent_free:
dm_uevent_free(event);
}
@@ -187,7 +187,7 @@ void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
struct dm_uevent *event;
if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) {
- DMERR("%s: Invalid event_type %d", __FUNCTION__, event_type);
+ DMERR("%s: Invalid event_type %d", __func__, event_type);
goto out;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5ebfb4d79901..87620b705bee 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -731,9 +731,9 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
else
rdev->desc_nr = sb->this_disk.number;
- if (refdev == 0)
+ if (!refdev) {
ret = 1;
- else {
+ } else {
__u64 ev1, ev2;
mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
if (!uuid_equal(refsb, sb)) {
@@ -1116,9 +1116,9 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
else
rdev->desc_nr = le32_to_cpu(sb->dev_number);
- if (refdev == 0)
+ if (!refdev) {
ret = 1;
- else {
+ } else {
__u64 ev1, ev2;
struct mdp_superblock_1 *refsb =
(struct mdp_superblock_1*)page_address(refdev->sb_page);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 3f299d835a2b..42ee1a2dc144 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -244,7 +244,8 @@ static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev)
conf->working_disks--;
mddev->degraded++;
printk(KERN_ALERT "multipath: IO failure on %s,"
- " disabling IO path. \n Operation continuing"
+ " disabling IO path.\n"
+ "multipath: Operation continuing"
" on %d IO paths.\n",
bdevname (rdev->bdev,b),
conf->working_disks);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index ff61b309129a..9fd473a6dbf5 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1008,8 +1008,8 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
} else
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
- " Operation continuing on %d devices\n",
+ printk(KERN_ALERT "raid1: Disk failure on %s, disabling device.\n"
+ "raid1: Operation continuing on %d devices.\n",
bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 32389d2f18fc..1e96aa3ff513 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1001,8 +1001,8 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
}
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- printk(KERN_ALERT "raid10: Disk failure on %s, disabling device. \n"
- " Operation continuing on %d devices\n",
+ printk(KERN_ALERT "raid10: Disk failure on %s, disabling device.\n"
+ "raid10: Operation continuing on %d devices.\n",
bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b162b839a662..968dacaced6d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -63,6 +63,7 @@
#define STRIPE_SHIFT (PAGE_SHIFT - 9)
#define STRIPE_SECTORS (STRIPE_SIZE>>9)
#define IO_THRESHOLD 1
+#define BYPASS_THRESHOLD 1
#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
#define HASH_MASK (NR_HASH - 1)
@@ -398,6 +399,7 @@ static void ops_run_io(struct stripe_head *sh)
might_sleep();
+ set_bit(STRIPE_IO_STARTED, &sh->state);
for (i = disks; i--; ) {
int rw;
struct bio *bi;
@@ -433,7 +435,7 @@ static void ops_run_io(struct stripe_head *sh)
bi->bi_bdev = rdev->bdev;
pr_debug("%s: for %llu schedule op %ld on disc %d\n",
- __FUNCTION__, (unsigned long long)sh->sector,
+ __func__, (unsigned long long)sh->sector,
bi->bi_rw, i);
atomic_inc(&sh->count);
bi->bi_sector = sh->sector + rdev->data_offset;
@@ -520,7 +522,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
raid5_conf_t *conf = sh->raid_conf;
int i;
- pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
/* clear completed biofills */
@@ -569,7 +571,7 @@ static void ops_run_biofill(struct stripe_head *sh)
raid5_conf_t *conf = sh->raid_conf;
int i;
- pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
for (i = sh->disks; i--; ) {
@@ -600,7 +602,7 @@ static void ops_complete_compute5(void *stripe_head_ref)
int target = sh->ops.target;
struct r5dev *tgt = &sh->dev[target];
- pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
set_bit(R5_UPTODATE, &tgt->flags);
@@ -625,7 +627,7 @@ ops_run_compute5(struct stripe_head *sh, unsigned long pending)
int i;
pr_debug("%s: stripe %llu block: %d\n",
- __FUNCTION__, (unsigned long long)sh->sector, target);
+ __func__, (unsigned long long)sh->sector, target);
BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
for (i = disks; i--; )
@@ -653,7 +655,7 @@ static void ops_complete_prexor(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
- pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
set_bit(STRIPE_OP_PREXOR, &sh->ops.complete);
@@ -670,7 +672,7 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
/* existing parity data subtracted */
struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
- pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
for (i = disks; i--; ) {
@@ -699,7 +701,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
*/
int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
- pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
for (i = disks; i--; ) {
@@ -744,7 +746,7 @@ static void ops_complete_postxor(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
- pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
@@ -757,7 +759,7 @@ static void ops_complete_write(void *stripe_head_ref)
struct stripe_head *sh = stripe_head_ref;
int disks = sh->disks, i, pd_idx = sh->pd_idx;
- pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
for (i = disks; i--; ) {
@@ -787,7 +789,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
unsigned long flags;
dma_async_tx_callback callback;
- pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
/* check if prexor is active which means only process blocks
@@ -837,7 +839,7 @@ static void ops_complete_check(void *stripe_head_ref)
struct stripe_head *sh = stripe_head_ref;
int pd_idx = sh->pd_idx;
- pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) &&
@@ -859,7 +861,7 @@ static void ops_run_check(struct stripe_head *sh)
int count = 0, pd_idx = sh->pd_idx, i;
struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
- pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
for (i = disks; i--; ) {
@@ -1260,8 +1262,8 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
}
set_bit(Faulty, &rdev->flags);
printk (KERN_ALERT
- "raid5: Disk failure on %s, disabling device."
- " Operation continuing on %d devices\n",
+ "raid5: Disk failure on %s, disabling device.\n"
+ "raid5: Operation continuing on %d devices.\n",
bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
}
}
@@ -1720,6 +1722,9 @@ handle_write_operations5(struct stripe_head *sh, int rcw, int expand)
locked++;
}
}
+ if (locked + 1 == disks)
+ if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
+ atomic_inc(&sh->raid_conf->pending_full_writes);
} else {
BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
@@ -1759,7 +1764,7 @@ handle_write_operations5(struct stripe_head *sh, int rcw, int expand)
locked++;
pr_debug("%s: stripe %llu locked: %d pending: %lx\n",
- __FUNCTION__, (unsigned long long)sh->sector,
+ __func__, (unsigned long long)sh->sector,
locked, sh->ops.pending);
return locked;
@@ -1947,6 +1952,9 @@ handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
STRIPE_SECTORS, 0, 0);
}
+ if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
+ if (atomic_dec_and_test(&conf->pending_full_writes))
+ md_wakeup_thread(conf->mddev->thread);
}
/* __handle_issuing_new_read_requests5 - returns 0 if there are no more disks
@@ -2149,6 +2157,10 @@ static void handle_completed_write_requests(raid5_conf_t *conf,
0);
}
}
+
+ if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
+ if (atomic_dec_and_test(&conf->pending_full_writes))
+ md_wakeup_thread(conf->mddev->thread);
}
static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
@@ -2333,6 +2345,9 @@ static void handle_issuing_new_write_requests6(raid5_conf_t *conf,
s->locked++;
set_bit(R5_Wantwrite, &sh->dev[i].flags);
}
+ if (s->locked == disks)
+ if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
+ atomic_inc(&conf->pending_full_writes);
/* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
set_bit(STRIPE_INSYNC, &sh->state);
@@ -3094,6 +3109,8 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
else
continue;
+ set_bit(STRIPE_IO_STARTED, &sh->state);
+
bi = &sh->dev[i].req;
bi->bi_rw = rw;
@@ -3164,7 +3181,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf)
clear_bit(STRIPE_DELAYED, &sh->state);
if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
- list_add_tail(&sh->lru, &conf->handle_list);
+ list_add_tail(&sh->lru, &conf->hold_list);
}
} else
blk_plug_device(conf->mddev->queue);
@@ -3442,6 +3459,58 @@ static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
}
}
+/* __get_priority_stripe - get the next stripe to process
+ *
+ * Full stripe writes are allowed to pass preread active stripes up until
+ * the bypass_threshold is exceeded. In general the bypass_count
+ * increments when the handle_list is handled before the hold_list; however, it
+ * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
+ * stripe with in flight i/o. The bypass_count will be reset when the
+ * head of the hold_list has changed, i.e. the head was promoted to the
+ * handle_list.
+ */
+static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
+{
+ struct stripe_head *sh;
+
+ pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
+ __func__,
+ list_empty(&conf->handle_list) ? "empty" : "busy",
+ list_empty(&conf->hold_list) ? "empty" : "busy",
+ atomic_read(&conf->pending_full_writes), conf->bypass_count);
+
+ if (!list_empty(&conf->handle_list)) {
+ sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
+
+ if (list_empty(&conf->hold_list))
+ conf->bypass_count = 0;
+ else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
+ if (conf->hold_list.next == conf->last_hold)
+ conf->bypass_count++;
+ else {
+ conf->last_hold = conf->hold_list.next;
+ conf->bypass_count -= conf->bypass_threshold;
+ if (conf->bypass_count < 0)
+ conf->bypass_count = 0;
+ }
+ }
+ } else if (!list_empty(&conf->hold_list) &&
+ ((conf->bypass_threshold &&
+ conf->bypass_count > conf->bypass_threshold) ||
+ atomic_read(&conf->pending_full_writes) == 0)) {
+ sh = list_entry(conf->hold_list.next,
+ typeof(*sh), lru);
+ conf->bypass_count -= conf->bypass_threshold;
+ if (conf->bypass_count < 0)
+ conf->bypass_count = 0;
+ } else
+ return NULL;
+
+ list_del_init(&sh->lru);
+ atomic_inc(&sh->count);
+ BUG_ON(atomic_read(&sh->count) != 1);
+ return sh;
+}
static int make_request(struct request_queue *q, struct bio * bi)
{
@@ -3914,7 +3983,6 @@ static void raid5d(mddev_t *mddev)
handled = 0;
spin_lock_irq(&conf->device_lock);
while (1) {
- struct list_head *first;
struct bio *bio;
if (conf->seq_flush != conf->seq_write) {
@@ -3936,17 +4004,12 @@ static void raid5d(mddev_t *mddev)
handled++;
}
- if (list_empty(&conf->handle_list)) {
+ sh = __get_priority_stripe(conf);
+
+ if (!sh) {
async_tx_issue_pending_all();
break;
}
-
- first = conf->handle_list.next;
- sh = list_entry(first, struct stripe_head, lru);
-
- list_del_init(first);
- atomic_inc(&sh->count);
- BUG_ON(atomic_read(&sh->count)!= 1);
spin_unlock_irq(&conf->device_lock);
handled++;
@@ -3978,15 +4041,13 @@ static ssize_t
raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
{
raid5_conf_t *conf = mddev_to_conf(mddev);
- char *end;
- int new;
+ unsigned long new;
if (len >= PAGE_SIZE)
return -EINVAL;
if (!conf)
return -ENODEV;
- new = simple_strtoul(page, &end, 10);
- if (!*page || (*end && *end != '\n') )
+ if (strict_strtoul(page, 10, &new))
return -EINVAL;
if (new <= 16 || new > 32768)
return -EINVAL;
@@ -4011,6 +4072,40 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
raid5_store_stripe_cache_size);
static ssize_t
+raid5_show_preread_threshold(mddev_t *mddev, char *page)
+{
+ raid5_conf_t *conf = mddev_to_conf(mddev);
+ if (conf)
+ return sprintf(page, "%d\n", conf->bypass_threshold);
+ else
+ return 0;
+}
+
+static ssize_t
+raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
+{
+ raid5_conf_t *conf = mddev_to_conf(mddev);
+ unsigned long new;
+ if (len >= PAGE_SIZE)
+ return -EINVAL;
+ if (!conf)
+ return -ENODEV;
+
+ if (strict_strtoul(page, 10, &new))
+ return -EINVAL;
+ if (new > conf->max_nr_stripes)
+ return -EINVAL;
+ conf->bypass_threshold = new;
+ return len;
+}
+
+static struct md_sysfs_entry
+raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
+ S_IRUGO | S_IWUSR,
+ raid5_show_preread_threshold,
+ raid5_store_preread_threshold);
+
+static ssize_t
stripe_cache_active_show(mddev_t *mddev, char *page)
{
raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -4026,6 +4121,7 @@ raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
static struct attribute *raid5_attrs[] = {
&raid5_stripecache_size.attr,
&raid5_stripecache_active.attr,
+ &raid5_preread_bypass_threshold.attr,
NULL,
};
static struct attribute_group raid5_attrs_group = {
@@ -4130,12 +4226,14 @@ static int run(mddev_t *mddev)
init_waitqueue_head(&conf->wait_for_stripe);
init_waitqueue_head(&conf->wait_for_overlap);
INIT_LIST_HEAD(&conf->handle_list);
+ INIT_LIST_HEAD(&conf->hold_list);
INIT_LIST_HEAD(&conf->delayed_list);
INIT_LIST_HEAD(&conf->bitmap_list);
INIT_LIST_HEAD(&conf->inactive_list);
atomic_set(&conf->active_stripes, 0);
atomic_set(&conf->preread_active_stripes, 0);
atomic_set(&conf->active_aligned_reads, 0);
+ conf->bypass_threshold = BYPASS_THRESHOLD;
pr_debug("raid5: run(%s) called.\n", mdname(mddev));
diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c
index 77a6e4bf503d..21987e3dbe6c 100644
--- a/drivers/md/raid6algos.c
+++ b/drivers/md/raid6algos.c
@@ -121,7 +121,8 @@ int __init raid6_select_algo(void)
j0 = jiffies;
while ( (j1 = jiffies) == j0 )
cpu_relax();
- while ( (jiffies-j1) < (1 << RAID6_TIME_JIFFIES_LG2) ) {
+ while (time_before(jiffies,
+ j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
(*algo)->gen_syndrome(disks, PAGE_SIZE, dptrs);
perf++;
}
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index d545c98dd5e7..01ea99c9bc1a 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -13,7 +13,7 @@
/*
* TODO:
* - remove "mark pages reserved-hacks" from memory allocation code
- * and implement nopage()
+ * and implement fault()
* - check decimation, calculating and reporting image size when
* using decimation
* - implement read(), user mode buffers and overlay (?)
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 13bac53db69a..6e655b4c6682 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -22,6 +22,7 @@
#include <linux/sm501.h>
#include <linux/sm501-regs.h>
+#include <linux/serial_8250.h>
#include <asm/io.h>
@@ -723,13 +724,14 @@ static void sm501_device_release(struct device *dev)
*/
static struct platform_device *
-sm501_create_subdev(struct sm501_devdata *sm,
- char *name, unsigned int res_count)
+sm501_create_subdev(struct sm501_devdata *sm, char *name,
+ unsigned int res_count, unsigned int platform_data_size)
{
struct sm501_device *smdev;
smdev = kzalloc(sizeof(struct sm501_device) +
- sizeof(struct resource) * res_count, GFP_KERNEL);
+ (sizeof(struct resource) * res_count) +
+ platform_data_size, GFP_KERNEL);
if (!smdev)
return NULL;
@@ -737,11 +739,15 @@ sm501_create_subdev(struct sm501_devdata *sm,
smdev->pdev.name = name;
smdev->pdev.id = sm->pdev_id;
- smdev->pdev.resource = (struct resource *)(smdev+1);
- smdev->pdev.num_resources = res_count;
-
smdev->pdev.dev.parent = sm->dev;
+ if (res_count) {
+ smdev->pdev.resource = (struct resource *)(smdev+1);
+ smdev->pdev.num_resources = res_count;
+ }
+ if (platform_data_size)
+ smdev->pdev.dev.platform_data = (void *)(smdev+1);
+
return &smdev->pdev;
}
@@ -829,7 +835,7 @@ static int sm501_register_usbhost(struct sm501_devdata *sm,
{
struct platform_device *pdev;
- pdev = sm501_create_subdev(sm, "sm501-usb", 3);
+ pdev = sm501_create_subdev(sm, "sm501-usb", 3, 0);
if (!pdev)
return -ENOMEM;
@@ -840,12 +846,55 @@ static int sm501_register_usbhost(struct sm501_devdata *sm,
return sm501_register_device(sm, pdev);
}
+static void sm501_setup_uart_data(struct sm501_devdata *sm,
+ struct plat_serial8250_port *uart_data,
+ unsigned int offset)
+{
+ uart_data->membase = sm->regs + offset;
+ uart_data->mapbase = sm->io_res->start + offset;
+ uart_data->iotype = UPIO_MEM;
+ uart_data->irq = sm->irq;
+ uart_data->flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ;
+ uart_data->regshift = 2;
+ uart_data->uartclk = (9600 * 16);
+}
+
+static int sm501_register_uart(struct sm501_devdata *sm, int devices)
+{
+ struct platform_device *pdev;
+ struct plat_serial8250_port *uart_data;
+
+ pdev = sm501_create_subdev(sm, "serial8250", 0,
+ sizeof(struct plat_serial8250_port) * 3);
+ if (!pdev)
+ return -ENOMEM;
+
+ uart_data = pdev->dev.platform_data;
+
+ if (devices & SM501_USE_UART0) {
+ sm501_setup_uart_data(sm, uart_data++, 0x30000);
+ sm501_unit_power(sm->dev, SM501_GATE_UART0, 1);
+ sm501_modify_reg(sm->dev, SM501_IRQ_MASK, 1 << 12, 0);
+ sm501_modify_reg(sm->dev, SM501_GPIO63_32_CONTROL, 0x01e0, 0);
+ }
+ if (devices & SM501_USE_UART1) {
+ sm501_setup_uart_data(sm, uart_data++, 0x30020);
+ sm501_unit_power(sm->dev, SM501_GATE_UART1, 1);
+ sm501_modify_reg(sm->dev, SM501_IRQ_MASK, 1 << 13, 0);
+ sm501_modify_reg(sm->dev, SM501_GPIO63_32_CONTROL, 0x1e00, 0);
+ }
+
+ pdev->id = PLAT8250_DEV_SM501;
+
+ return sm501_register_device(sm, pdev);
+}
+
static int sm501_register_display(struct sm501_devdata *sm,
resource_size_t *mem_avail)
{
struct platform_device *pdev;
- pdev = sm501_create_subdev(sm, "sm501-fb", 4);
+ pdev = sm501_create_subdev(sm, "sm501-fb", 4, 0);
if (!pdev)
return -ENOMEM;
@@ -963,6 +1012,7 @@ static unsigned int sm501_mem_local[] = {
static int sm501_init_dev(struct sm501_devdata *sm)
{
+ struct sm501_initdata *idata;
resource_size_t mem_avail;
unsigned long dramctrl;
unsigned long devid;
@@ -980,6 +1030,9 @@ static int sm501_init_dev(struct sm501_devdata *sm)
return -EINVAL;
}
+ /* disable irqs */
+ writel(0, sm->regs + SM501_IRQ_MASK);
+
dramctrl = readl(sm->regs + SM501_DRAM_CONTROL);
mem_avail = sm501_mem_local[(dramctrl >> 13) & 0x7];
@@ -998,15 +1051,14 @@ static int sm501_init_dev(struct sm501_devdata *sm)
/* check to see if we have some device initialisation */
- if (sm->platdata) {
- struct sm501_platdata *pdata = sm->platdata;
+ idata = sm->platdata ? sm->platdata->init : NULL;
+ if (idata) {
+ sm501_init_regs(sm, idata);
- if (pdata->init) {
- sm501_init_regs(sm, sm->platdata->init);
-
- if (pdata->init->devices & SM501_USE_USB_HOST)
- sm501_register_usbhost(sm, &mem_avail);
- }
+ if (idata->devices & SM501_USE_USB_HOST)
+ sm501_register_usbhost(sm, &mem_avail);
+ if (idata->devices & (SM501_USE_UART0 | SM501_USE_UART1))
+ sm501_register_uart(sm, idata->devices);
}
ret = sm501_check_clocks(sm);
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 5e859486eaf8..ad34e2d22524 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -204,8 +204,7 @@ static inline int ucb1x00_ts_pen_down(struct ucb1x00_ts *ts)
static int ucb1x00_thread(void *_ts)
{
struct ucb1x00_ts *ts = _ts;
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
+ DECLARE_WAITQUEUE(wait, current);
int valid = 0;
set_freezable();
@@ -234,7 +233,7 @@ static int ucb1x00_thread(void *_ts)
if (ucb1x00_ts_pen_down(ts)) {
- set_task_state(tsk, TASK_INTERRUPTIBLE);
+ set_current_state(TASK_INTERRUPTIBLE);
ucb1x00_enable_irq(ts->ucb, UCB_IRQ_TSPX, machine_is_collie() ? UCB_RISING : UCB_FALLING);
ucb1x00_disable(ts->ucb);
@@ -262,7 +261,7 @@ static int ucb1x00_thread(void *_ts)
valid = 1;
}
- set_task_state(tsk, TASK_INTERRUPTIBLE);
+ set_current_state(TASK_INTERRUPTIBLE);
timeout = HZ / 100;
}
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index fafb57fed761..0736cff9d97a 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -31,7 +31,6 @@
static LIST_HEAD(container_list);
static DEFINE_MUTEX(container_list_lock);
static struct class enclosure_class;
-static struct class enclosure_component_class;
/**
* enclosure_find - find an enclosure given a device
@@ -166,6 +165,40 @@ void enclosure_unregister(struct enclosure_device *edev)
}
EXPORT_SYMBOL_GPL(enclosure_unregister);
+#define ENCLOSURE_NAME_SIZE 64
+
+static void enclosure_link_name(struct enclosure_component *cdev, char *name)
+{
+ strcpy(name, "enclosure_device:");
+ strcat(name, cdev->cdev.bus_id);
+}
+
+static void enclosure_remove_links(struct enclosure_component *cdev)
+{
+ char name[ENCLOSURE_NAME_SIZE];
+
+ enclosure_link_name(cdev, name);
+ sysfs_remove_link(&cdev->dev->kobj, name);
+ sysfs_remove_link(&cdev->cdev.kobj, "device");
+}
+
+static int enclosure_add_links(struct enclosure_component *cdev)
+{
+ int error;
+ char name[ENCLOSURE_NAME_SIZE];
+
+ error = sysfs_create_link(&cdev->cdev.kobj, &cdev->dev->kobj, "device");
+ if (error)
+ return error;
+
+ enclosure_link_name(cdev, name);
+ error = sysfs_create_link(&cdev->dev->kobj, &cdev->cdev.kobj, name);
+ if (error)
+ sysfs_remove_link(&cdev->cdev.kobj, "device");
+
+ return error;
+}
+
static void enclosure_release(struct device *cdev)
{
struct enclosure_device *edev = to_enclosure_device(cdev);
@@ -178,10 +211,15 @@ static void enclosure_component_release(struct device *dev)
{
struct enclosure_component *cdev = to_enclosure_component(dev);
- put_device(cdev->dev);
+ if (cdev->dev) {
+ enclosure_remove_links(cdev);
+ put_device(cdev->dev);
+ }
put_device(dev->parent);
}
+static struct attribute_group *enclosure_groups[];
+
/**
* enclosure_component_register - add a particular component to an enclosure
* @edev: the enclosure to add the component
@@ -217,12 +255,14 @@ enclosure_component_register(struct enclosure_device *edev,
ecomp->number = number;
cdev = &ecomp->cdev;
cdev->parent = get_device(&edev->edev);
- cdev->class = &enclosure_component_class;
if (name)
snprintf(cdev->bus_id, BUS_ID_SIZE, "%s", name);
else
snprintf(cdev->bus_id, BUS_ID_SIZE, "%u", number);
+ cdev->release = enclosure_component_release;
+ cdev->groups = enclosure_groups;
+
err = device_register(cdev);
if (err)
ERR_PTR(err);
@@ -255,10 +295,12 @@ int enclosure_add_device(struct enclosure_device *edev, int component,
cdev = &edev->component[component];
- device_del(&cdev->cdev);
+ if (cdev->dev)
+ enclosure_remove_links(cdev);
+
put_device(cdev->dev);
cdev->dev = get_device(dev);
- return device_add(&cdev->cdev);
+ return enclosure_add_links(cdev);
}
EXPORT_SYMBOL_GPL(enclosure_add_device);
@@ -442,24 +484,32 @@ static ssize_t get_component_type(struct device *cdev,
}
-static struct device_attribute enclosure_component_attrs[] = {
- __ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
- set_component_fault),
- __ATTR(status, S_IRUGO | S_IWUSR, get_component_status,
- set_component_status),
- __ATTR(active, S_IRUGO | S_IWUSR, get_component_active,
- set_component_active),
- __ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate,
- set_component_locate),
- __ATTR(type, S_IRUGO, get_component_type, NULL),
- __ATTR_NULL
+static DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
+ set_component_fault);
+static DEVICE_ATTR(status, S_IRUGO | S_IWUSR, get_component_status,
+ set_component_status);
+static DEVICE_ATTR(active, S_IRUGO | S_IWUSR, get_component_active,
+ set_component_active);
+static DEVICE_ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate,
+ set_component_locate);
+static DEVICE_ATTR(type, S_IRUGO, get_component_type, NULL);
+
+static struct attribute *enclosure_component_attrs[] = {
+ &dev_attr_fault.attr,
+ &dev_attr_status.attr,
+ &dev_attr_active.attr,
+ &dev_attr_locate.attr,
+ &dev_attr_type.attr,
+ NULL
};
-static struct class enclosure_component_class = {
- .name = "enclosure_component",
- .owner = THIS_MODULE,
- .dev_attrs = enclosure_component_attrs,
- .dev_release = enclosure_component_release,
+static struct attribute_group enclosure_group = {
+ .attrs = enclosure_component_attrs,
+};
+
+static struct attribute_group *enclosure_groups[] = {
+ &enclosure_group,
+ NULL
};
static int __init enclosure_init(void)
@@ -469,20 +519,12 @@ static int __init enclosure_init(void)
err = class_register(&enclosure_class);
if (err)
return err;
- err = class_register(&enclosure_component_class);
- if (err)
- goto err_out;
return 0;
- err_out:
- class_unregister(&enclosure_class);
-
- return err;
}
static void __exit enclosure_exit(void)
{
- class_unregister(&enclosure_component_class);
class_unregister(&enclosure_class);
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0697aa8ea774..8082c1d142df 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2011,7 +2011,7 @@ config E1000_DISABLE_PACKET_SPLIT
config E1000E
tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
- depends on PCI
+ depends on PCI && (!SPARC32 || BROKEN)
---help---
This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
ethernet family of adapters. For PCI or PCI-X e1000 adapters,
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 75ef9d0d974d..f9d6b4dca180 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -196,3 +196,160 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
}
}
EXPORT_SYMBOL_GPL(mlx4_buf_free);
+
+static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
+{
+ struct mlx4_db_pgdir *pgdir;
+
+ pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
+ if (!pgdir)
+ return NULL;
+
+ bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
+ pgdir->bits[0] = pgdir->order0;
+ pgdir->bits[1] = pgdir->order1;
+ pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
+ &pgdir->db_dma, GFP_KERNEL);
+ if (!pgdir->db_page) {
+ kfree(pgdir);
+ return NULL;
+ }
+
+ return pgdir;
+}
+
+static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
+ struct mlx4_db *db, int order)
+{
+ int o;
+ int i;
+
+ for (o = order; o <= 1; ++o) {
+ i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
+ if (i < MLX4_DB_PER_PAGE >> o)
+ goto found;
+ }
+
+ return -ENOMEM;
+
+found:
+ clear_bit(i, pgdir->bits[o]);
+
+ i <<= o;
+
+ if (o > order)
+ set_bit(i ^ 1, pgdir->bits[order]);
+
+ db->u.pgdir = pgdir;
+ db->index = i;
+ db->db = pgdir->db_page + db->index;
+ db->dma = pgdir->db_dma + db->index * 4;
+ db->order = order;
+
+ return 0;
+}
+
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_db_pgdir *pgdir;
+ int ret = 0;
+
+ mutex_lock(&priv->pgdir_mutex);
+
+ list_for_each_entry(pgdir, &priv->pgdir_list, list)
+ if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
+ goto out;
+
+ pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
+ if (!pgdir) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ list_add(&pgdir->list, &priv->pgdir_list);
+
+ /* This should never fail -- we just allocated an empty page: */
+ WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
+
+out:
+ mutex_unlock(&priv->pgdir_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_db_alloc);
+
+void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int o;
+ int i;
+
+ mutex_lock(&priv->pgdir_mutex);
+
+ o = db->order;
+ i = db->index;
+
+ if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
+ clear_bit(i ^ 1, db->u.pgdir->order0);
+ ++o;
+ }
+ i >>= o;
+ set_bit(i, db->u.pgdir->bits[o]);
+
+ if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
+ dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ db->u.pgdir->db_page, db->u.pgdir->db_dma);
+ list_del(&db->u.pgdir->list);
+ kfree(db->u.pgdir);
+ }
+
+ mutex_unlock(&priv->pgdir_mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_db_free);
+
+int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
+ int size, int max_direct)
+{
+ int err;
+
+ err = mlx4_db_alloc(dev, &wqres->db, 1);
+ if (err)
+ return err;
+
+ *wqres->db.db = 0;
+
+ err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
+ if (err)
+ goto err_db;
+
+ err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
+ &wqres->mtt);
+ if (err)
+ goto err_buf;
+
+ err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
+ if (err)
+ goto err_mtt;
+
+ return 0;
+
+err_mtt:
+ mlx4_mtt_cleanup(dev, &wqres->mtt);
+err_buf:
+ mlx4_buf_free(dev, size, &wqres->buf);
+err_db:
+ mlx4_db_free(dev, &wqres->db);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
+
+void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
+ int size)
+{
+ mlx4_mtt_cleanup(dev, &wqres->mtt);
+ mlx4_buf_free(dev, size, &wqres->buf);
+ mlx4_db_free(dev, &wqres->db);
+}
+EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index caa5bcf54e35..6fda0af9d0a6 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -180,7 +180,7 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
cq_context->mtt_base_addr_h = mtt_addr >> 32;
cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
- err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
+ err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 49a4acab5e82..a6aa49fc1d68 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -798,6 +798,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
+ INIT_LIST_HEAD(&priv->pgdir_list);
+ mutex_init(&priv->pgdir_mutex);
+
/*
* Now reset the HCA before we touch the PCI capabilities or
* attempt a firmware command, since a boot ROM may have left
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 73336810e652..a4023c2dd050 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -257,6 +257,9 @@ struct mlx4_priv {
struct list_head ctx_list;
spinlock_t ctx_lock;
+ struct list_head pgdir_list;
+ struct mutex pgdir_mutex;
+
struct mlx4_fw fw;
struct mlx4_cmd cmd;
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index fa24e6597591..ee5484c44a18 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -299,3 +299,34 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
}
EXPORT_SYMBOL_GPL(mlx4_qp_query);
+int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ struct mlx4_qp_context *context,
+ struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
+{
+ int err;
+ int i;
+ enum mlx4_qp_state states[] = {
+ MLX4_QP_STATE_RST,
+ MLX4_QP_STATE_INIT,
+ MLX4_QP_STATE_RTR,
+ MLX4_QP_STATE_RTS
+ };
+
+ for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
+ context->flags &= cpu_to_be32(~(0xf << 28));
+ context->flags |= cpu_to_be32(states[i + 1] << 28);
+ err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
+ context, 0, 0, qp);
+ if (err) {
+ mlx4_err(dev, "Failed to bring QP to state: "
+ "%d with error: %d\n",
+ states[i + 1], err);
+ return err;
+ }
+
+ *qp_state = states[i + 1];
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index c2642bc1d49b..2c343aae38d4 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -56,7 +56,7 @@ obj-$(CONFIG_RTL8187) += rtl8187.o
obj-$(CONFIG_ADM8211) += adm8211.o
-obj-$(CONFIG_IWLCORE) += iwlwifi/
+obj-$(CONFIG_IWLWIFI) += iwlwifi/
obj-$(CONFIG_RT2X00) += rt2x00/
obj-$(CONFIG_P54_COMMON) += p54/
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index c4e631d14bfe..9a25f550fd16 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,6 +1,11 @@
+config IWLWIFI
+ bool
+ default n
+
config IWLCORE
tristate "Intel Wireless Wifi Core"
depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
+ select IWLWIFI
config IWLWIFI_LEDS
bool
@@ -106,6 +111,7 @@ config IWL3945
tristate "Intel PRO/Wireless 3945ABG/BG Network Connection"
depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
select FW_LOADER
+ select IWLWIFI
---help---
Select to build the driver supporting the:
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index b07ba2a14119..9304c4555079 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -491,7 +491,7 @@ typedef enum {
*/
void sync_buffer(int cpu)
{
- struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[cpu];
+ struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
struct mm_struct *mm = NULL;
struct task_struct * new;
unsigned long cookie = 0;
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index c93d3d2640ab..efcbf4b4579f 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -27,7 +27,7 @@
#include "buffer_sync.h"
#include "oprof.h"
-struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
+DEFINE_PER_CPU_SHARED_ALIGNED(struct oprofile_cpu_buffer, cpu_buffer);
static void wq_sync_buffer(struct work_struct *work);
@@ -39,7 +39,7 @@ void free_cpu_buffers(void)
int i;
for_each_online_cpu(i)
- vfree(cpu_buffer[i].buffer);
+ vfree(per_cpu(cpu_buffer, i).buffer);
}
int alloc_cpu_buffers(void)
@@ -49,7 +49,7 @@ int alloc_cpu_buffers(void)
unsigned long buffer_size = fs_cpu_buffer_size;
for_each_online_cpu(i) {
- struct oprofile_cpu_buffer * b = &cpu_buffer[i];
+ struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
cpu_to_node(i));
@@ -83,7 +83,7 @@ void start_cpu_work(void)
work_enabled = 1;
for_each_online_cpu(i) {
- struct oprofile_cpu_buffer * b = &cpu_buffer[i];
+ struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
/*
* Spread the work by 1 jiffy per cpu so they dont all
@@ -100,7 +100,7 @@ void end_cpu_work(void)
work_enabled = 0;
for_each_online_cpu(i) {
- struct oprofile_cpu_buffer * b = &cpu_buffer[i];
+ struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
cancel_delayed_work(&b->work);
}
@@ -227,7 +227,7 @@ static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
unsigned long event, int is_kernel)
{
- struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
if (!backtrace_depth) {
log_sample(cpu_buf, pc, is_kernel, event);
@@ -254,13 +254,13 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
{
- struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
log_sample(cpu_buf, pc, is_kernel, event);
}
void oprofile_add_trace(unsigned long pc)
{
- struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
if (!cpu_buf->tracing)
return;
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index c66c025abe75..13588174311d 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -14,6 +14,7 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/cache.h>
+#include <linux/sched.h>
struct task_struct;
@@ -47,7 +48,7 @@ struct oprofile_cpu_buffer {
struct delayed_work work;
} ____cacheline_aligned;
-extern struct oprofile_cpu_buffer cpu_buffer[];
+DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index d1f6d776e9e4..f99b28e7b79a 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -23,7 +23,7 @@ void oprofile_reset_stats(void)
int i;
for_each_possible_cpu(i) {
- cpu_buf = &cpu_buffer[i];
+ cpu_buf = &per_cpu(cpu_buffer, i);
cpu_buf->sample_received = 0;
cpu_buf->sample_lost_overflow = 0;
cpu_buf->backtrace_aborted = 0;
@@ -49,7 +49,7 @@ void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
return;
for_each_possible_cpu(i) {
- cpu_buf = &cpu_buffer[i];
+ cpu_buf = &per_cpu(cpu_buffer, i);
snprintf(buf, 10, "cpu%d", i);
cpudir = oprofilefs_mkdir(sb, dir, buf);
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 12a1645a2e43..e85cbf116db1 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -167,7 +167,7 @@ static int pnp_bus_suspend(struct device *dev, pm_message_t state)
return error;
}
- if (pnp_dev->protocol && pnp_dev->protocol->suspend)
+ if (pnp_dev->protocol->suspend)
pnp_dev->protocol->suspend(pnp_dev, state);
return 0;
}
@@ -181,7 +181,7 @@ static int pnp_bus_resume(struct device *dev)
if (!pnp_drv)
return 0;
- if (pnp_dev->protocol && pnp_dev->protocol->resume)
+ if (pnp_dev->protocol->resume)
pnp_dev->protocol->resume(pnp_dev);
if (pnp_can_write(pnp_dev)) {
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 37993206ae5d..e4daf4635c48 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -49,8 +49,11 @@ static void quirk_awe32_resources(struct pnp_dev *dev)
port2->max += 0x400;
port3->min += 0x800;
port3->max += 0x800;
+ dev_info(&dev->dev,
+ "AWE32 quirk - added ioports 0x%lx and 0x%lx\n",
+ (unsigned long)port2->min,
+ (unsigned long)port3->min);
}
- printk(KERN_INFO "pnp: AWE32 quirk - adding two ports\n");
}
static void quirk_cmi8330_resources(struct pnp_dev *dev)
@@ -73,7 +76,8 @@ static void quirk_cmi8330_resources(struct pnp_dev *dev)
IORESOURCE_DMA_8BIT)
dma->map = 0x000A;
}
- printk(KERN_INFO "pnp: CMI8330 quirk - fixing interrupts and dma\n");
+ dev_info(&dev->dev, "CMI8330 quirk - forced possible IRQs to 5, 7, 10 "
+ "and DMA channels to 1, 3\n");
}
static void quirk_sb16audio_resources(struct pnp_dev *dev)
@@ -104,8 +108,7 @@ static void quirk_sb16audio_resources(struct pnp_dev *dev)
changed = 1;
}
if (changed)
- printk(KERN_INFO
- "pnp: SB audio device quirk - increasing port range\n");
+ dev_info(&dev->dev, "SB audio device quirk - increased port range\n");
}
@@ -214,8 +217,8 @@ void pnp_fixup_device(struct pnp_dev *dev)
quirk = pnp_fixups[i].quirk_function;
#ifdef DEBUG
- dev_dbg(&dev->dev, "calling quirk 0x%p", quirk);
- print_fn_descriptor_symbol(": %s()\n",
+ dev_dbg(&dev->dev, "calling ");
+ print_fn_descriptor_symbol("%s()\n",
(unsigned long) *quirk);
#endif
(*quirk)(dev);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 02a4c8cf2b2d..6cc2c0330230 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -20,10 +20,6 @@ menuconfig RTC_CLASS
if RTC_CLASS
-if GEN_RTC || RTC
-comment "Conflicting RTC option has been selected, check GEN_RTC and RTC"
-endif
-
config RTC_HCTOSYS
bool "Set system time from RTC on startup and resume"
depends on RTC_CLASS = y
@@ -304,6 +300,7 @@ comment "Platform RTC drivers"
config RTC_DRV_CMOS
tristate "PC-style 'CMOS'"
depends on X86 || ALPHA || ARM || M32R || ATARI || PPC || MIPS
+ default y if X86
help
Say "yes" here to get direct support for the real time clock
found in every PC or ACPI-based system, and some other boards.
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 52abffc86bcd..39e64ab1ecb7 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -83,7 +83,7 @@ static int at91_rtc_readtime(struct device *dev, struct rtc_time *tm)
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
tm->tm_year = tm->tm_year - 1900;
- pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__,
+ pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -97,7 +97,7 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
{
unsigned long cr;
- pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__,
+ pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -142,7 +142,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->enabled = (at91_sys_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
? 1 : 0;
- pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__,
+ pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -178,7 +178,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
if (alrm->enabled)
at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
- pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__,
+ pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
tm.tm_min, tm.tm_sec);
@@ -193,7 +193,7 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
{
int ret = 0;
- pr_debug("%s(): cmd=%08x, arg=%08lx.\n", __FUNCTION__, cmd, arg);
+ pr_debug("%s(): cmd=%08x, arg=%08lx.\n", __func__, cmd, arg);
switch (cmd) {
case RTC_AIE_OFF: /* alarm off */
@@ -265,7 +265,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
rtc_update_irq(rtc, 1, events);
- pr_debug("%s(): num=%ld, events=0x%02lx\n", __FUNCTION__,
+ pr_debug("%s(): num=%ld, events=0x%02lx\n", __func__,
events >> 8, events & 0x000000FF);
return IRQ_HANDLED;
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 56728a2a3385..38d8742a4bdf 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -288,7 +288,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
rtc_update_irq(rtc->rtcdev, 1, events);
- pr_debug("%s: num=%ld, events=0x%02lx\n", __FUNCTION__,
+ pr_debug("%s: num=%ld, events=0x%02lx\n", __func__,
events >> 8, events & 0x000000FF);
return IRQ_HANDLED;
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 7b002ceeaa7d..b9397818f73a 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -122,7 +122,7 @@ static int ds1302_rtc_read_time(struct device *dev, struct rtc_time *tm)
dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
- __FUNCTION__,
+ __func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday);
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index d08912f18ddd..a83a40b3ebaa 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -181,8 +181,7 @@ ds1511_wdog_disable(void)
* stupidly, some callers call with year unmolested;
* and some call with year = year - 1900. thanks.
*/
- int
-ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
+static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
{
u8 mon, day, dow, hrs, min, sec, yrs, cen;
unsigned int flags;
@@ -245,8 +244,7 @@ ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
return 0;
}
- int
-ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
+static int ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
{
unsigned int century;
unsigned int flags;
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index e0900ca678ec..6fa4556f5f5c 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -50,13 +50,13 @@ static int ds1672_get_datetime(struct i2c_client *client, struct rtc_time *tm)
/* read date registers */
if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
- dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
+ dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
dev_dbg(&client->dev,
"%s: raw read data - counters=%02x,%02x,%02x,%02x\n",
- __FUNCTION__, buf[0], buf[1], buf[2], buf[3]);
+ __func__, buf[0], buf[1], buf[2], buf[3]);
time = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
@@ -64,7 +64,7 @@ static int ds1672_get_datetime(struct i2c_client *client, struct rtc_time *tm)
dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
- __FUNCTION__, tm->tm_sec, tm->tm_min, tm->tm_hour,
+ __func__, tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
return 0;
@@ -84,7 +84,7 @@ static int ds1672_set_mmss(struct i2c_client *client, unsigned long secs)
xfer = i2c_master_send(client, buf, 6);
if (xfer != 6) {
- dev_err(&client->dev, "%s: send: %d\n", __FUNCTION__, xfer);
+ dev_err(&client->dev, "%s: send: %d\n", __func__, xfer);
return -EIO;
}
@@ -98,7 +98,7 @@ static int ds1672_set_datetime(struct i2c_client *client, struct rtc_time *tm)
dev_dbg(&client->dev,
"%s: secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
- __FUNCTION__,
+ __func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
@@ -133,7 +133,7 @@ static int ds1672_get_control(struct i2c_client *client, u8 *status)
/* read control register */
if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
- dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
+ dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
@@ -199,7 +199,7 @@ static int ds1672_probe(struct i2c_adapter *adapter, int address, int kind)
struct i2c_client *client;
struct rtc_device *rtc;
- dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
+ dev_dbg(&adapter->dev, "%s\n", __func__);
if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
err = -ENODEV;
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index 725b0c73c333..fb15e3fb4ce2 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -15,16 +15,15 @@
#include <linux/bcd.h>
#include <linux/rtc.h>
-#define DRV_NAME "isl1208"
-#define DRV_VERSION "0.2"
+#define DRV_VERSION "0.3"
/* Register map */
/* rtc section */
#define ISL1208_REG_SC 0x00
#define ISL1208_REG_MN 0x01
#define ISL1208_REG_HR 0x02
-#define ISL1208_REG_HR_MIL (1<<7) /* 24h/12h mode */
-#define ISL1208_REG_HR_PM (1<<5) /* PM/AM bit in 12h mode */
+#define ISL1208_REG_HR_MIL (1<<7) /* 24h/12h mode */
+#define ISL1208_REG_HR_PM (1<<5) /* PM/AM bit in 12h mode */
#define ISL1208_REG_DT 0x03
#define ISL1208_REG_MO 0x04
#define ISL1208_REG_YR 0x05
@@ -33,14 +32,14 @@
/* control/status section */
#define ISL1208_REG_SR 0x07
-#define ISL1208_REG_SR_ARST (1<<7) /* auto reset */
-#define ISL1208_REG_SR_XTOSCB (1<<6) /* crystal oscillator */
-#define ISL1208_REG_SR_WRTC (1<<4) /* write rtc */
-#define ISL1208_REG_SR_ALM (1<<2) /* alarm */
-#define ISL1208_REG_SR_BAT (1<<1) /* battery */
-#define ISL1208_REG_SR_RTCF (1<<0) /* rtc fail */
+#define ISL1208_REG_SR_ARST (1<<7) /* auto reset */
+#define ISL1208_REG_SR_XTOSCB (1<<6) /* crystal oscillator */
+#define ISL1208_REG_SR_WRTC (1<<4) /* write rtc */
+#define ISL1208_REG_SR_ALM (1<<2) /* alarm */
+#define ISL1208_REG_SR_BAT (1<<1) /* battery */
+#define ISL1208_REG_SR_RTCF (1<<0) /* rtc fail */
#define ISL1208_REG_INT 0x08
-#define ISL1208_REG_09 0x09 /* reserved */
+#define ISL1208_REG_09 0x09 /* reserved */
#define ISL1208_REG_ATR 0x0a
#define ISL1208_REG_DTR 0x0b
@@ -58,39 +57,21 @@
#define ISL1208_REG_USR2 0x13
#define ISL1208_USR_SECTION_LEN 2
-/* i2c configuration */
-#define ISL1208_I2C_ADDR 0xde
-
-static const unsigned short normal_i2c[] = {
- ISL1208_I2C_ADDR>>1, I2C_CLIENT_END
-};
-I2C_CLIENT_INSMOD; /* defines addr_data */
-
-static int isl1208_attach_adapter(struct i2c_adapter *adapter);
-static int isl1208_detach_client(struct i2c_client *client);
-
-static struct i2c_driver isl1208_driver = {
- .driver = {
- .name = DRV_NAME,
- },
- .id = I2C_DRIVERID_ISL1208,
- .attach_adapter = &isl1208_attach_adapter,
- .detach_client = &isl1208_detach_client,
-};
+static struct i2c_driver isl1208_driver;
/* block read */
static int
isl1208_i2c_read_regs(struct i2c_client *client, u8 reg, u8 buf[],
- unsigned len)
+ unsigned len)
{
u8 reg_addr[1] = { reg };
struct i2c_msg msgs[2] = {
- { client->addr, client->flags, sizeof(reg_addr), reg_addr },
- { client->addr, client->flags | I2C_M_RD, len, buf }
+ {client->addr, 0, sizeof(reg_addr), reg_addr}
+ ,
+ {client->addr, I2C_M_RD, len, buf}
};
int ret;
- BUG_ON(len == 0);
BUG_ON(reg > ISL1208_REG_USR2);
BUG_ON(reg + len > ISL1208_REG_USR2 + 1);
@@ -103,15 +84,14 @@ isl1208_i2c_read_regs(struct i2c_client *client, u8 reg, u8 buf[],
/* block write */
static int
isl1208_i2c_set_regs(struct i2c_client *client, u8 reg, u8 const buf[],
- unsigned len)
+ unsigned len)
{
u8 i2c_buf[ISL1208_REG_USR2 + 2];
struct i2c_msg msgs[1] = {
- { client->addr, client->flags, len + 1, i2c_buf }
+ {client->addr, 0, len + 1, i2c_buf}
};
int ret;
- BUG_ON(len == 0);
BUG_ON(reg > ISL1208_REG_USR2);
BUG_ON(reg + len > ISL1208_REG_USR2 + 1);
@@ -125,7 +105,8 @@ isl1208_i2c_set_regs(struct i2c_client *client, u8 reg, u8 const buf[],
}
/* simple check to see wether we have a isl1208 */
-static int isl1208_i2c_validate_client(struct i2c_client *client)
+static int
+isl1208_i2c_validate_client(struct i2c_client *client)
{
u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, };
u8 zero_mask[ISL1208_RTC_SECTION_LEN] = {
@@ -139,24 +120,29 @@ static int isl1208_i2c_validate_client(struct i2c_client *client)
return ret;
for (i = 0; i < ISL1208_RTC_SECTION_LEN; ++i) {
- if (regs[i] & zero_mask[i]) /* check if bits are cleared */
+ if (regs[i] & zero_mask[i]) /* check if bits are cleared */
return -ENODEV;
}
return 0;
}
-static int isl1208_i2c_get_sr(struct i2c_client *client)
+static int
+isl1208_i2c_get_sr(struct i2c_client *client)
{
- return i2c_smbus_read_byte_data(client, ISL1208_REG_SR) == -1 ? -EIO:0;
+ int sr = i2c_smbus_read_byte_data(client, ISL1208_REG_SR);
+ if (sr < 0)
+ return -EIO;
+
+ return sr;
}
-static int isl1208_i2c_get_atr(struct i2c_client *client)
+static int
+isl1208_i2c_get_atr(struct i2c_client *client)
{
int atr = i2c_smbus_read_byte_data(client, ISL1208_REG_ATR);
-
if (atr < 0)
- return -EIO;
+ return atr;
/* The 6bit value in the ATR register controls the load
* capacitance C_load * in steps of 0.25pF
@@ -169,51 +155,54 @@ static int isl1208_i2c_get_atr(struct i2c_client *client)
*
*/
- atr &= 0x3f; /* mask out lsb */
- atr ^= 1<<5; /* invert 6th bit */
- atr += 2*9; /* add offset of 4.5pF; unit[atr] = 0.25pF */
+ atr &= 0x3f; /* mask out lsb */
+ atr ^= 1 << 5; /* invert 6th bit */
+ atr += 2 * 9; /* add offset of 4.5pF; unit[atr] = 0.25pF */
return atr;
}
-static int isl1208_i2c_get_dtr(struct i2c_client *client)
+static int
+isl1208_i2c_get_dtr(struct i2c_client *client)
{
int dtr = i2c_smbus_read_byte_data(client, ISL1208_REG_DTR);
-
if (dtr < 0)
return -EIO;
/* dtr encodes adjustments of {-60,-40,-20,0,20,40,60} ppm */
- dtr = ((dtr & 0x3) * 20) * (dtr & (1<<2) ? -1 : 1);
+ dtr = ((dtr & 0x3) * 20) * (dtr & (1 << 2) ? -1 : 1);
return dtr;
}
-static int isl1208_i2c_get_usr(struct i2c_client *client)
+static int
+isl1208_i2c_get_usr(struct i2c_client *client)
{
u8 buf[ISL1208_USR_SECTION_LEN] = { 0, };
int ret;
- ret = isl1208_i2c_read_regs (client, ISL1208_REG_USR1, buf,
- ISL1208_USR_SECTION_LEN);
+ ret = isl1208_i2c_read_regs(client, ISL1208_REG_USR1, buf,
+ ISL1208_USR_SECTION_LEN);
if (ret < 0)
return ret;
return (buf[1] << 8) | buf[0];
}
-static int isl1208_i2c_set_usr(struct i2c_client *client, u16 usr)
+static int
+isl1208_i2c_set_usr(struct i2c_client *client, u16 usr)
{
u8 buf[ISL1208_USR_SECTION_LEN];
buf[0] = usr & 0xff;
buf[1] = (usr >> 8) & 0xff;
- return isl1208_i2c_set_regs (client, ISL1208_REG_USR1, buf,
- ISL1208_USR_SECTION_LEN);
+ return isl1208_i2c_set_regs(client, ISL1208_REG_USR1, buf,
+ ISL1208_USR_SECTION_LEN);
}
-static int isl1208_rtc_proc(struct device *dev, struct seq_file *seq)
+static int
+isl1208_rtc_proc(struct device *dev, struct seq_file *seq)
{
struct i2c_client *const client = to_i2c_client(dev);
int sr, dtr, atr, usr;
@@ -230,20 +219,19 @@ static int isl1208_rtc_proc(struct device *dev, struct seq_file *seq)
(sr & ISL1208_REG_SR_ALM) ? " ALM" : "",
(sr & ISL1208_REG_SR_WRTC) ? " WRTC" : "",
(sr & ISL1208_REG_SR_XTOSCB) ? " XTOSCB" : "",
- (sr & ISL1208_REG_SR_ARST) ? " ARST" : "",
- sr);
+ (sr & ISL1208_REG_SR_ARST) ? " ARST" : "", sr);
seq_printf(seq, "batt_status\t: %s\n",
(sr & ISL1208_REG_SR_RTCF) ? "bad" : "okay");
dtr = isl1208_i2c_get_dtr(client);
- if (dtr >= 0 -1)
+ if (dtr >= 0 - 1)
seq_printf(seq, "digital_trim\t: %d ppm\n", dtr);
atr = isl1208_i2c_get_atr(client);
if (atr >= 0)
seq_printf(seq, "analog_trim\t: %d.%.2d pF\n",
- atr>>2, (atr&0x3)*25);
+ atr >> 2, (atr & 0x3) * 25);
usr = isl1208_i2c_get_usr(client);
if (usr >= 0)
@@ -252,9 +240,8 @@ static int isl1208_rtc_proc(struct device *dev, struct seq_file *seq)
return 0;
}
-
-static int isl1208_i2c_read_time(struct i2c_client *client,
- struct rtc_time *tm)
+static int
+isl1208_i2c_read_time(struct i2c_client *client, struct rtc_time *tm)
{
int sr;
u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, };
@@ -274,27 +261,30 @@ static int isl1208_i2c_read_time(struct i2c_client *client,
tm->tm_sec = BCD2BIN(regs[ISL1208_REG_SC]);
tm->tm_min = BCD2BIN(regs[ISL1208_REG_MN]);
- { /* HR field has a more complex interpretation */
+
+ /* HR field has a more complex interpretation */
+ {
const u8 _hr = regs[ISL1208_REG_HR];
- if (_hr & ISL1208_REG_HR_MIL) /* 24h format */
+ if (_hr & ISL1208_REG_HR_MIL) /* 24h format */
tm->tm_hour = BCD2BIN(_hr & 0x3f);
- else { // 12h format
+ else {
+ /* 12h format */
tm->tm_hour = BCD2BIN(_hr & 0x1f);
- if (_hr & ISL1208_REG_HR_PM) /* PM flag set */
+ if (_hr & ISL1208_REG_HR_PM) /* PM flag set */
tm->tm_hour += 12;
}
}
tm->tm_mday = BCD2BIN(regs[ISL1208_REG_DT]);
- tm->tm_mon = BCD2BIN(regs[ISL1208_REG_MO]) - 1; /* rtc starts at 1 */
+ tm->tm_mon = BCD2BIN(regs[ISL1208_REG_MO]) - 1; /* rtc starts at 1 */
tm->tm_year = BCD2BIN(regs[ISL1208_REG_YR]) + 100;
tm->tm_wday = BCD2BIN(regs[ISL1208_REG_DW]);
return 0;
}
-static int isl1208_i2c_read_alarm(struct i2c_client *client,
- struct rtc_wkalrm *alarm)
+static int
+isl1208_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
{
struct rtc_time *const tm = &alarm->time;
u8 regs[ISL1208_ALARM_SECTION_LEN] = { 0, };
@@ -307,7 +297,7 @@ static int isl1208_i2c_read_alarm(struct i2c_client *client,
}
sr = isl1208_i2c_read_regs(client, ISL1208_REG_SCA, regs,
- ISL1208_ALARM_SECTION_LEN);
+ ISL1208_ALARM_SECTION_LEN);
if (sr < 0) {
dev_err(&client->dev, "%s: reading alarm section failed\n",
__func__);
@@ -315,23 +305,25 @@ static int isl1208_i2c_read_alarm(struct i2c_client *client,
}
/* MSB of each alarm register is an enable bit */
- tm->tm_sec = BCD2BIN(regs[ISL1208_REG_SCA-ISL1208_REG_SCA] & 0x7f);
- tm->tm_min = BCD2BIN(regs[ISL1208_REG_MNA-ISL1208_REG_SCA] & 0x7f);
- tm->tm_hour = BCD2BIN(regs[ISL1208_REG_HRA-ISL1208_REG_SCA] & 0x3f);
- tm->tm_mday = BCD2BIN(regs[ISL1208_REG_DTA-ISL1208_REG_SCA] & 0x3f);
- tm->tm_mon = BCD2BIN(regs[ISL1208_REG_MOA-ISL1208_REG_SCA] & 0x1f)-1;
- tm->tm_wday = BCD2BIN(regs[ISL1208_REG_DWA-ISL1208_REG_SCA] & 0x03);
+ tm->tm_sec = BCD2BIN(regs[ISL1208_REG_SCA - ISL1208_REG_SCA] & 0x7f);
+ tm->tm_min = BCD2BIN(regs[ISL1208_REG_MNA - ISL1208_REG_SCA] & 0x7f);
+ tm->tm_hour = BCD2BIN(regs[ISL1208_REG_HRA - ISL1208_REG_SCA] & 0x3f);
+ tm->tm_mday = BCD2BIN(regs[ISL1208_REG_DTA - ISL1208_REG_SCA] & 0x3f);
+ tm->tm_mon =
+ BCD2BIN(regs[ISL1208_REG_MOA - ISL1208_REG_SCA] & 0x1f) - 1;
+ tm->tm_wday = BCD2BIN(regs[ISL1208_REG_DWA - ISL1208_REG_SCA] & 0x03);
return 0;
}
-static int isl1208_rtc_read_time(struct device *dev, struct rtc_time *tm)
+static int
+isl1208_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
return isl1208_i2c_read_time(to_i2c_client(dev), tm);
}
-static int isl1208_i2c_set_time(struct i2c_client *client,
- struct rtc_time const *tm)
+static int
+isl1208_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm)
{
int sr;
u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, };
@@ -353,7 +345,7 @@ static int isl1208_i2c_set_time(struct i2c_client *client,
}
/* set WRTC */
- sr = i2c_smbus_write_byte_data (client, ISL1208_REG_SR,
+ sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR,
sr | ISL1208_REG_SR_WRTC);
if (sr < 0) {
dev_err(&client->dev, "%s: writing SR failed\n", __func__);
@@ -369,7 +361,7 @@ static int isl1208_i2c_set_time(struct i2c_client *client,
}
/* clear WRTC again */
- sr = i2c_smbus_write_byte_data (client, ISL1208_REG_SR,
+ sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR,
sr & ~ISL1208_REG_SR_WRTC);
if (sr < 0) {
dev_err(&client->dev, "%s: writing SR failed\n", __func__);
@@ -380,70 +372,69 @@ static int isl1208_i2c_set_time(struct i2c_client *client,
}
-static int isl1208_rtc_set_time(struct device *dev, struct rtc_time *tm)
+static int
+isl1208_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
return isl1208_i2c_set_time(to_i2c_client(dev), tm);
}
-static int isl1208_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+static int
+isl1208_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
return isl1208_i2c_read_alarm(to_i2c_client(dev), alarm);
}
static const struct rtc_class_ops isl1208_rtc_ops = {
- .proc = isl1208_rtc_proc,
- .read_time = isl1208_rtc_read_time,
- .set_time = isl1208_rtc_set_time,
- .read_alarm = isl1208_rtc_read_alarm,
- //.set_alarm = isl1208_rtc_set_alarm,
+ .proc = isl1208_rtc_proc,
+ .read_time = isl1208_rtc_read_time,
+ .set_time = isl1208_rtc_set_time,
+ .read_alarm = isl1208_rtc_read_alarm,
+ /*.set_alarm = isl1208_rtc_set_alarm, */
};
/* sysfs interface */
-static ssize_t isl1208_sysfs_show_atrim(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+isl1208_sysfs_show_atrim(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- int atr;
-
- atr = isl1208_i2c_get_atr(to_i2c_client(dev));
+ int atr = isl1208_i2c_get_atr(to_i2c_client(dev));
if (atr < 0)
return atr;
- return sprintf(buf, "%d.%.2d pF\n", atr>>2, (atr&0x3)*25);
+ return sprintf(buf, "%d.%.2d pF\n", atr >> 2, (atr & 0x3) * 25);
}
+
static DEVICE_ATTR(atrim, S_IRUGO, isl1208_sysfs_show_atrim, NULL);
-static ssize_t isl1208_sysfs_show_dtrim(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+isl1208_sysfs_show_dtrim(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- int dtr;
-
- dtr = isl1208_i2c_get_dtr(to_i2c_client(dev));
+ int dtr = isl1208_i2c_get_dtr(to_i2c_client(dev));
if (dtr < 0)
return dtr;
return sprintf(buf, "%d ppm\n", dtr);
}
+
static DEVICE_ATTR(dtrim, S_IRUGO, isl1208_sysfs_show_dtrim, NULL);
-static ssize_t isl1208_sysfs_show_usr(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+isl1208_sysfs_show_usr(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- int usr;
-
- usr = isl1208_i2c_get_usr(to_i2c_client(dev));
+ int usr = isl1208_i2c_get_usr(to_i2c_client(dev));
if (usr < 0)
return usr;
return sprintf(buf, "0x%.4x\n", usr);
}
-static ssize_t isl1208_sysfs_store_usr(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t
+isl1208_sysfs_store_usr(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int usr = -1;
@@ -460,124 +451,116 @@ static ssize_t isl1208_sysfs_store_usr(struct device *dev,
return isl1208_i2c_set_usr(to_i2c_client(dev), usr) ? -EIO : count;
}
+
static DEVICE_ATTR(usr, S_IRUGO | S_IWUSR, isl1208_sysfs_show_usr,
isl1208_sysfs_store_usr);
static int
-isl1208_probe(struct i2c_adapter *adapter, int addr, int kind)
+isl1208_sysfs_register(struct device *dev)
{
- int rc = 0;
- struct i2c_client *new_client = NULL;
- struct rtc_device *rtc = NULL;
+ int err;
+
+ err = device_create_file(dev, &dev_attr_atrim);
+ if (err)
+ return err;
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
- rc = -ENODEV;
- goto failout;
+ err = device_create_file(dev, &dev_attr_dtrim);
+ if (err) {
+ device_remove_file(dev, &dev_attr_atrim);
+ return err;
}
- new_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
- if (new_client == NULL) {
- rc = -ENOMEM;
- goto failout;
+ err = device_create_file(dev, &dev_attr_usr);
+ if (err) {
+ device_remove_file(dev, &dev_attr_atrim);
+ device_remove_file(dev, &dev_attr_dtrim);
}
- new_client->addr = addr;
- new_client->adapter = adapter;
- new_client->driver = &isl1208_driver;
- new_client->flags = 0;
- strcpy(new_client->name, DRV_NAME);
+ return 0;
+}
- if (kind < 0) {
- rc = isl1208_i2c_validate_client(new_client);
- if (rc < 0)
- goto failout;
- }
+static int
+isl1208_sysfs_unregister(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_atrim);
+ device_remove_file(dev, &dev_attr_atrim);
+ device_remove_file(dev, &dev_attr_usr);
+
+ return 0;
+}
+
+static int
+isl1208_probe(struct i2c_client *client)
+{
+ int rc = 0;
+ struct rtc_device *rtc;
- rc = i2c_attach_client(new_client);
- if (rc < 0)
- goto failout;
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
- dev_info(&new_client->dev,
+ if (isl1208_i2c_validate_client(client) < 0)
+ return -ENODEV;
+
+ dev_info(&client->dev,
"chip found, driver version " DRV_VERSION "\n");
rtc = rtc_device_register(isl1208_driver.driver.name,
- &new_client->dev,
- &isl1208_rtc_ops, THIS_MODULE);
-
- if (IS_ERR(rtc)) {
- rc = PTR_ERR(rtc);
- goto failout_detach;
- }
+ &client->dev, &isl1208_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
- i2c_set_clientdata(new_client, rtc);
+ i2c_set_clientdata(client, rtc);
- rc = isl1208_i2c_get_sr(new_client);
+ rc = isl1208_i2c_get_sr(client);
if (rc < 0) {
- dev_err(&new_client->dev, "reading status failed\n");
- goto failout_unregister;
+ dev_err(&client->dev, "reading status failed\n");
+ goto exit_unregister;
}
if (rc & ISL1208_REG_SR_RTCF)
- dev_warn(&new_client->dev, "rtc power failure detected, "
+ dev_warn(&client->dev, "rtc power failure detected, "
"please set clock.\n");
- rc = device_create_file(&new_client->dev, &dev_attr_atrim);
- if (rc < 0)
- goto failout_unregister;
- rc = device_create_file(&new_client->dev, &dev_attr_dtrim);
- if (rc < 0)
- goto failout_atrim;
- rc = device_create_file(&new_client->dev, &dev_attr_usr);
- if (rc < 0)
- goto failout_dtrim;
+ rc = isl1208_sysfs_register(&client->dev);
+ if (rc)
+ goto exit_unregister;
return 0;
- failout_dtrim:
- device_remove_file(&new_client->dev, &dev_attr_dtrim);
- failout_atrim:
- device_remove_file(&new_client->dev, &dev_attr_atrim);
- failout_unregister:
+exit_unregister:
rtc_device_unregister(rtc);
- failout_detach:
- i2c_detach_client(new_client);
- failout:
- kfree(new_client);
- return rc;
-}
-static int
-isl1208_attach_adapter (struct i2c_adapter *adapter)
-{
- return i2c_probe(adapter, &addr_data, isl1208_probe);
+ return rc;
}
static int
-isl1208_detach_client(struct i2c_client *client)
+isl1208_remove(struct i2c_client *client)
{
- int rc;
- struct rtc_device *const rtc = i2c_get_clientdata(client);
-
- if (rtc)
- rtc_device_unregister(rtc); /* do we need to kfree? */
-
- rc = i2c_detach_client(client);
- if (rc)
- return rc;
+ struct rtc_device *rtc = i2c_get_clientdata(client);
- kfree(client);
+ isl1208_sysfs_unregister(&client->dev);
+ rtc_device_unregister(rtc);
return 0;
}
-/* module management */
+static struct i2c_driver isl1208_driver = {
+ .driver = {
+ .name = "rtc-isl1208",
+ },
+ .probe = isl1208_probe,
+ .remove = isl1208_remove,
+};
-static int __init isl1208_init(void)
+static int __init
+isl1208_init(void)
{
return i2c_add_driver(&isl1208_driver);
}
-static void __exit isl1208_exit(void)
+static void __exit
+isl1208_exit(void)
{
i2c_del_driver(&isl1208_driver);
}
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index 7683412970c4..ded3c0abad83 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -98,7 +98,7 @@ static int max6900_i2c_read_regs(struct i2c_client *client, u8 *buf)
rc = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
if (rc != ARRAY_SIZE(msgs)) {
dev_err(&client->dev, "%s: register read failed\n",
- __FUNCTION__);
+ __func__);
return -EIO;
}
return 0;
@@ -150,7 +150,7 @@ static int max6900_i2c_write_regs(struct i2c_client *client, u8 const *buf)
write_failed:
dev_err(&client->dev, "%s: register write failed\n",
- __FUNCTION__);
+ __func__);
return -EIO;
}
@@ -214,7 +214,7 @@ static int max6900_i2c_clear_write_protect(struct i2c_client *client)
rc = i2c_smbus_write_byte_data (client, MAX6900_REG_CONTROL_WRITE, 0);
if (rc < 0) {
dev_err(&client->dev, "%s: control register write failed\n",
- __FUNCTION__);
+ __func__);
return -EIO;
}
return 0;
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c
index 1f956dc5d56e..12f0310ae89c 100644
--- a/drivers/rtc/rtc-max6902.c
+++ b/drivers/rtc/rtc-max6902.c
@@ -140,7 +140,7 @@ static int max6902_get_datetime(struct device *dev, struct rtc_time *dt)
dt->tm_year -= 1900;
#ifdef MAX6902_DEBUG
- printk("\n%s : Read RTC values\n",__FUNCTION__);
+ printk("\n%s : Read RTC values\n",__func__);
printk("tm_hour: %i\n",dt->tm_hour);
printk("tm_min : %i\n",dt->tm_min);
printk("tm_sec : %i\n",dt->tm_sec);
@@ -158,7 +158,7 @@ static int max6902_set_datetime(struct device *dev, struct rtc_time *dt)
dt->tm_year = dt->tm_year+1900;
#ifdef MAX6902_DEBUG
- printk("\n%s : Setting RTC values\n",__FUNCTION__);
+ printk("\n%s : Setting RTC values\n",__func__);
printk("tm_sec : %i\n",dt->tm_sec);
printk("tm_min : %i\n",dt->tm_min);
printk("tm_hour: %i\n",dt->tm_hour);
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index b3317fcc16c3..a41681d26eba 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -18,17 +18,7 @@
#include <linux/bcd.h>
#include <linux/rtc.h>
-#define DRV_VERSION "0.4.2"
-
-/* Addresses to scan: none
- * This chip cannot be reliably autodetected. An empty eeprom
- * located at 0x51 will pass the validation routine due to
- * the way the registers are implemented.
- */
-static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
-
-/* Module parameters */
-I2C_CLIENT_INSMOD;
+#define DRV_VERSION "0.4.3"
#define PCF8563_REG_ST1 0x00 /* status */
#define PCF8563_REG_ST2 0x01
@@ -53,8 +43,10 @@ I2C_CLIENT_INSMOD;
#define PCF8563_SC_LV 0x80 /* low voltage */
#define PCF8563_MO_C 0x80 /* century */
+static struct i2c_driver pcf8563_driver;
+
struct pcf8563 {
- struct i2c_client client;
+ struct rtc_device *rtc;
/*
* The meaning of MO_C bit varies by the chip type.
* From PCF8563 datasheet: this bit is toggled when the years
@@ -72,16 +64,13 @@ struct pcf8563 {
int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
};
-static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind);
-static int pcf8563_detach(struct i2c_client *client);
-
/*
* In the routines that deal directly with the pcf8563 hardware, we use
* rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
*/
static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
{
- struct pcf8563 *pcf8563 = container_of(client, struct pcf8563, client);
+ struct pcf8563 *pcf8563 = i2c_get_clientdata(client);
unsigned char buf[13] = { PCF8563_REG_ST1 };
struct i2c_msg msgs[] = {
@@ -91,7 +80,7 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
/* read registers */
if ((i2c_transfer(client->adapter, msgs, 2)) != 2) {
- dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
+ dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
@@ -102,7 +91,7 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
dev_dbg(&client->dev,
"%s: raw data is st1=%02x, st2=%02x, sec=%02x, min=%02x, hr=%02x, "
"mday=%02x, wday=%02x, mon=%02x, year=%02x\n",
- __FUNCTION__,
+ __func__,
buf[0], buf[1], buf[2], buf[3],
buf[4], buf[5], buf[6], buf[7],
buf[8]);
@@ -123,7 +112,7 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
- __FUNCTION__,
+ __func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
@@ -138,13 +127,13 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
{
- struct pcf8563 *pcf8563 = container_of(client, struct pcf8563, client);
+ struct pcf8563 *pcf8563 = i2c_get_clientdata(client);
int i, err;
unsigned char buf[9];
dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
- __FUNCTION__,
+ __func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
@@ -174,7 +163,7 @@ static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
if (err != sizeof(data)) {
dev_err(&client->dev,
"%s: err=%d addr=%02x, data=%02x\n",
- __FUNCTION__, err, data[0], data[1]);
+ __func__, err, data[0], data[1]);
return -EIO;
}
};
@@ -219,7 +208,7 @@ static int pcf8563_validate_client(struct i2c_client *client)
if (xfer != ARRAY_SIZE(msgs)) {
dev_err(&client->dev,
"%s: could not read register 0x%02X\n",
- __FUNCTION__, pattern[i].reg);
+ __func__, pattern[i].reg);
return -EIO;
}
@@ -231,7 +220,7 @@ static int pcf8563_validate_client(struct i2c_client *client)
dev_dbg(&client->dev,
"%s: pattern=%d, reg=%x, mask=0x%02x, min=%d, "
"max=%d, value=%d, raw=0x%02X\n",
- __FUNCTION__, i, pattern[i].reg, pattern[i].mask,
+ __func__, i, pattern[i].reg, pattern[i].mask,
pattern[i].min, pattern[i].max,
value, buf);
@@ -257,100 +246,67 @@ static const struct rtc_class_ops pcf8563_rtc_ops = {
.set_time = pcf8563_rtc_set_time,
};
-static int pcf8563_attach(struct i2c_adapter *adapter)
-{
- return i2c_probe(adapter, &addr_data, pcf8563_probe);
-}
-
-static struct i2c_driver pcf8563_driver = {
- .driver = {
- .name = "pcf8563",
- },
- .id = I2C_DRIVERID_PCF8563,
- .attach_adapter = &pcf8563_attach,
- .detach_client = &pcf8563_detach,
-};
-
-static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind)
+static int pcf8563_probe(struct i2c_client *client)
{
struct pcf8563 *pcf8563;
- struct i2c_client *client;
- struct rtc_device *rtc;
int err = 0;
- dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
+ dev_dbg(&client->dev, "%s\n", __func__);
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
- err = -ENODEV;
- goto exit;
- }
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
- if (!(pcf8563 = kzalloc(sizeof(struct pcf8563), GFP_KERNEL))) {
- err = -ENOMEM;
- goto exit;
- }
-
- client = &pcf8563->client;
- client->addr = address;
- client->driver = &pcf8563_driver;
- client->adapter = adapter;
-
- strlcpy(client->name, pcf8563_driver.driver.name, I2C_NAME_SIZE);
+ pcf8563 = kzalloc(sizeof(struct pcf8563), GFP_KERNEL);
+ if (!pcf8563)
+ return -ENOMEM;
/* Verify the chip is really an PCF8563 */
- if (kind < 0) {
- if (pcf8563_validate_client(client) < 0) {
- err = -ENODEV;
- goto exit_kfree;
- }
- }
-
- /* Inform the i2c layer */
- if ((err = i2c_attach_client(client)))
+ if (pcf8563_validate_client(client) < 0) {
+ err = -ENODEV;
goto exit_kfree;
+ }
dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
- rtc = rtc_device_register(pcf8563_driver.driver.name, &client->dev,
- &pcf8563_rtc_ops, THIS_MODULE);
+ pcf8563->rtc = rtc_device_register(pcf8563_driver.driver.name,
+ &client->dev, &pcf8563_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- err = PTR_ERR(rtc);
- goto exit_detach;
+ if (IS_ERR(pcf8563->rtc)) {
+ err = PTR_ERR(pcf8563->rtc);
+ goto exit_kfree;
}
- i2c_set_clientdata(client, rtc);
+ i2c_set_clientdata(client, pcf8563);
return 0;
-exit_detach:
- i2c_detach_client(client);
-
exit_kfree:
kfree(pcf8563);
-exit:
return err;
}
-static int pcf8563_detach(struct i2c_client *client)
+static int pcf8563_remove(struct i2c_client *client)
{
- struct pcf8563 *pcf8563 = container_of(client, struct pcf8563, client);
- int err;
- struct rtc_device *rtc = i2c_get_clientdata(client);
+ struct pcf8563 *pcf8563 = i2c_get_clientdata(client);
- if (rtc)
- rtc_device_unregister(rtc);
-
- if ((err = i2c_detach_client(client)))
- return err;
+ if (pcf8563->rtc)
+ rtc_device_unregister(pcf8563->rtc);
kfree(pcf8563);
return 0;
}
+static struct i2c_driver pcf8563_driver = {
+ .driver = {
+ .name = "rtc-pcf8563",
+ },
+ .probe = pcf8563_probe,
+ .remove = pcf8563_remove,
+};
+
static int __init pcf8563_init(void)
{
return i2c_add_driver(&pcf8563_driver);
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index 8b3997007506..3d09d8f0b1f0 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -15,7 +15,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/mc146818rtc.h>
+#include <linux/rtc.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/bcd.h>
diff --git a/drivers/rtc/rtc-rs5c313.c b/drivers/rtc/rtc-rs5c313.c
index 664e89a817ed..1c14d4497c4d 100644
--- a/drivers/rtc/rtc-rs5c313.c
+++ b/drivers/rtc/rtc-rs5c313.c
@@ -228,7 +228,7 @@ static int rs5c313_rtc_read_time(struct device *dev, struct rtc_time *tm)
ndelay(700); /* CE:L */
if (cnt++ > 100) {
- dev_err(dev, "%s: timeout error\n", __FUNCTION__);
+ dev_err(dev, "%s: timeout error\n", __func__);
return -EIO;
}
}
@@ -289,7 +289,7 @@ static int rs5c313_rtc_set_time(struct device *dev, struct rtc_time *tm)
ndelay(700); /* CE:L */
if (cnt++ > 100) {
- dev_err(dev, "%s: timeout error\n", __FUNCTION__);
+ dev_err(dev, "%s: timeout error\n", __func__);
return -EIO;
}
}
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 6b67b5097927..7e63074708eb 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -99,7 +99,7 @@ static int rs5c_get_regs(struct rs5c372 *rs5c)
* least 80219 chips; this works around that bug.
*/
if ((i2c_transfer(client->adapter, msgs, 1)) != 1) {
- pr_debug("%s: can't read registers\n", rs5c->rtc->name);
+ dev_warn(&client->dev, "can't read registers\n");
return -EIO;
}
@@ -166,7 +166,7 @@ static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm)
dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
- __FUNCTION__,
+ __func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
@@ -181,7 +181,7 @@ static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm)
dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d "
"mday=%d, mon=%d, year=%d, wday=%d\n",
- __FUNCTION__,
+ __func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
@@ -195,7 +195,7 @@ static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm)
buf[7] = BIN2BCD(tm->tm_year - 100);
if ((i2c_master_send(client, buf, 8)) != 8) {
- dev_err(&client->dev, "%s: write error\n", __FUNCTION__);
+ dev_err(&client->dev, "%s: write error\n", __func__);
return -EIO;
}
@@ -220,7 +220,7 @@ static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim)
*osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768;
if (trim) {
- dev_dbg(&client->dev, "%s: raw trim=%x\n", __FUNCTION__, tmp);
+ dev_dbg(&client->dev, "%s: raw trim=%x\n", __func__, tmp);
tmp &= RS5C372_TRIM_MASK;
if (tmp & 0x3e) {
int t = tmp & 0x3f;
@@ -500,7 +500,7 @@ static int rs5c372_probe(struct i2c_client *client)
struct rs5c372 *rs5c372;
struct rtc_time tm;
- dev_dbg(&client->dev, "%s\n", __FUNCTION__);
+ dev_dbg(&client->dev, "%s\n", __func__);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
err = -ENODEV;
@@ -512,12 +512,12 @@ static int rs5c372_probe(struct i2c_client *client)
goto exit;
}
- /* we read registers 0x0f then 0x00-0x0f; skip the first one */
- rs5c372->regs=&rs5c372->buf[1];
-
rs5c372->client = client;
i2c_set_clientdata(client, rs5c372);
+ /* we read registers 0x0f then 0x00-0x0f; skip the first one */
+ rs5c372->regs = &rs5c372->buf[1];
+
err = rs5c_get_regs(rs5c372);
if (err < 0)
goto exit_kfree;
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 9f4d5129a496..f26e0cad8f16 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -68,7 +68,7 @@ static void s3c_rtc_setaie(int to)
{
unsigned int tmp;
- pr_debug("%s: aie=%d\n", __FUNCTION__, to);
+ pr_debug("%s: aie=%d\n", __func__, to);
tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
@@ -82,7 +82,7 @@ static void s3c_rtc_setpie(int to)
{
unsigned int tmp;
- pr_debug("%s: pie=%d\n", __FUNCTION__, to);
+ pr_debug("%s: pie=%d\n", __func__, to);
spin_lock_irq(&s3c_rtc_pie_lock);
tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE;
@@ -457,7 +457,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- pr_debug("%s: probe=%p\n", __FUNCTION__, pdev);
+ pr_debug("%s: probe=%p\n", __func__, pdev);
/* find the IRQs */
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index c594b34c6767..110699bb4787 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -361,7 +361,7 @@ static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
- __FUNCTION__,
+ __func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday);
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index 4d27ccc4fc06..2531ce4c9db0 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -145,6 +145,8 @@ rtc_sysfs_set_wakealarm(struct device *dev, struct device_attribute *attr,
unsigned long now, alarm;
struct rtc_wkalrm alm;
struct rtc_device *rtc = to_rtc_device(dev);
+ char *buf_ptr;
+ int adjust = 0;
/* Only request alarms that trigger in the future. Disable them
* by writing another time, e.g. 0 meaning Jan 1 1970 UTC.
@@ -154,7 +156,15 @@ rtc_sysfs_set_wakealarm(struct device *dev, struct device_attribute *attr,
return retval;
rtc_tm_to_time(&alm.time, &now);
- alarm = simple_strtoul(buf, NULL, 0);
+ buf_ptr = (char *)buf;
+ if (*buf_ptr == '+') {
+ buf_ptr++;
+ adjust = 1;
+ }
+ alarm = simple_strtoul(buf_ptr, NULL, 0);
+ if (adjust) {
+ alarm += now;
+ }
if (alarm > now) {
/* Avoid accidentally clobbering active alarms; we can't
* entirely prevent that here, without even the minimal
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 254c9fce27da..bc930022004a 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -147,7 +147,7 @@ static int __devexit test_remove(struct platform_device *plat_dev)
return 0;
}
-static struct platform_driver test_drv = {
+static struct platform_driver test_driver = {
.probe = test_probe,
.remove = __devexit_p(test_remove),
.driver = {
@@ -160,7 +160,7 @@ static int __init test_init(void)
{
int err;
- if ((err = platform_driver_register(&test_drv)))
+ if ((err = platform_driver_register(&test_driver)))
return err;
if ((test0 = platform_device_alloc("rtc-test", 0)) == NULL) {
@@ -191,7 +191,7 @@ exit_free_test0:
platform_device_put(test0);
exit_driver_unregister:
- platform_driver_unregister(&test_drv);
+ platform_driver_unregister(&test_driver);
return err;
}
@@ -199,7 +199,7 @@ static void __exit test_exit(void)
{
platform_device_unregister(test0);
platform_device_unregister(test1);
- platform_driver_unregister(&test_drv);
+ platform_driver_unregister(&test_driver);
}
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index 24203a06051a..10025d840268 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -107,7 +107,7 @@ static int v3020_read_time(struct device *dev, struct rtc_time *dt)
dt->tm_year = BCD2BIN(tmp)+100;
#ifdef DEBUG
- printk("\n%s : Read RTC values\n",__FUNCTION__);
+ printk("\n%s : Read RTC values\n",__func__);
printk("tm_hour: %i\n",dt->tm_hour);
printk("tm_min : %i\n",dt->tm_min);
printk("tm_sec : %i\n",dt->tm_sec);
@@ -126,7 +126,7 @@ static int v3020_set_time(struct device *dev, struct rtc_time *dt)
struct v3020 *chip = dev_get_drvdata(dev);
#ifdef DEBUG
- printk("\n%s : Setting RTC values\n",__FUNCTION__);
+ printk("\n%s : Setting RTC values\n",__func__);
printk("tm_sec : %i\n",dt->tm_sec);
printk("tm_min : %i\n",dt->tm_min);
printk("tm_hour: %i\n",dt->tm_hour);
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index b90fb1866ce9..095282f63523 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -22,20 +22,7 @@
#include <linux/rtc.h>
#include <linux/delay.h>
-#define DRV_VERSION "1.0.7"
-
-/* Addresses to scan: none. This chip is located at
- * 0x6f and uses a two bytes register addressing.
- * Two bytes need to be written to read a single register,
- * while most other chips just require one and take the second
- * one as the data to be written. To prevent corrupting
- * unknown chips, the user must explicitly set the probe parameter.
- */
-
-static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
-
-/* Insmod parameters */
-I2C_CLIENT_INSMOD;
+#define DRV_VERSION "1.0.8"
/* offsets into CCR area */
@@ -91,19 +78,7 @@ I2C_CLIENT_INSMOD;
#define X1205_HR_MIL 0x80 /* Set in ccr.hour for 24 hr mode */
-/* Prototypes */
-static int x1205_attach(struct i2c_adapter *adapter);
-static int x1205_detach(struct i2c_client *client);
-static int x1205_probe(struct i2c_adapter *adapter, int address, int kind);
-
-static struct i2c_driver x1205_driver = {
- .driver = {
- .name = "x1205",
- },
- .id = I2C_DRIVERID_X1205,
- .attach_adapter = &x1205_attach,
- .detach_client = &x1205_detach,
-};
+static struct i2c_driver x1205_driver;
/*
* In the routines that deal directly with the x1205 hardware, we use
@@ -124,14 +99,14 @@ static int x1205_get_datetime(struct i2c_client *client, struct rtc_time *tm,
/* read date registers */
if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
- dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
+ dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
dev_dbg(&client->dev,
"%s: raw read data - sec=%02x, min=%02x, hr=%02x, "
"mday=%02x, mon=%02x, year=%02x, wday=%02x, y2k=%02x\n",
- __FUNCTION__,
+ __func__,
buf[0], buf[1], buf[2], buf[3],
buf[4], buf[5], buf[6], buf[7]);
@@ -146,7 +121,7 @@ static int x1205_get_datetime(struct i2c_client *client, struct rtc_time *tm,
dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
- __FUNCTION__,
+ __func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
@@ -164,7 +139,7 @@ static int x1205_get_status(struct i2c_client *client, unsigned char *sr)
/* read status register */
if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
- dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
+ dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
@@ -187,7 +162,7 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
dev_dbg(&client->dev,
"%s: secs=%d, mins=%d, hours=%d\n",
- __FUNCTION__,
+ __func__,
tm->tm_sec, tm->tm_min, tm->tm_hour);
buf[CCR_SEC] = BIN2BCD(tm->tm_sec);
@@ -200,7 +175,7 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
if (datetoo) {
dev_dbg(&client->dev,
"%s: mday=%d, mon=%d, year=%d, wday=%d\n",
- __FUNCTION__,
+ __func__,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
buf[CCR_MDAY] = BIN2BCD(tm->tm_mday);
@@ -216,12 +191,12 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
/* this sequence is required to unlock the chip */
if ((xfer = i2c_master_send(client, wel, 3)) != 3) {
- dev_err(&client->dev, "%s: wel - %d\n", __FUNCTION__, xfer);
+ dev_err(&client->dev, "%s: wel - %d\n", __func__, xfer);
return -EIO;
}
if ((xfer = i2c_master_send(client, rwel, 3)) != 3) {
- dev_err(&client->dev, "%s: rwel - %d\n", __FUNCTION__, xfer);
+ dev_err(&client->dev, "%s: rwel - %d\n", __func__, xfer);
return -EIO;
}
@@ -233,7 +208,7 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
if (xfer != 3) {
dev_err(&client->dev,
"%s: xfer=%d addr=%02x, data=%02x\n",
- __FUNCTION__,
+ __func__,
xfer, rdata[1], rdata[2]);
return -EIO;
}
@@ -241,7 +216,7 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
/* disable further writes */
if ((xfer = i2c_master_send(client, diswe, 3)) != 3) {
- dev_err(&client->dev, "%s: diswe - %d\n", __FUNCTION__, xfer);
+ dev_err(&client->dev, "%s: diswe - %d\n", __func__, xfer);
return -EIO;
}
@@ -274,11 +249,11 @@ static int x1205_get_dtrim(struct i2c_client *client, int *trim)
/* read dtr register */
if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
- dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
+ dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
- dev_dbg(&client->dev, "%s: raw dtr=%x\n", __FUNCTION__, dtr);
+ dev_dbg(&client->dev, "%s: raw dtr=%x\n", __func__, dtr);
*trim = 0;
@@ -306,11 +281,11 @@ static int x1205_get_atrim(struct i2c_client *client, int *trim)
/* read atr register */
if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
- dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
+ dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
- dev_dbg(&client->dev, "%s: raw atr=%x\n", __FUNCTION__, atr);
+ dev_dbg(&client->dev, "%s: raw atr=%x\n", __func__, atr);
/* atr is a two's complement value on 6 bits,
* perform sign extension. The formula is
@@ -319,11 +294,11 @@ static int x1205_get_atrim(struct i2c_client *client, int *trim)
if (atr & 0x20)
atr |= 0xC0;
- dev_dbg(&client->dev, "%s: raw atr=%x (%d)\n", __FUNCTION__, atr, atr);
+ dev_dbg(&client->dev, "%s: raw atr=%x (%d)\n", __func__, atr, atr);
*trim = (atr * 250) + 11000;
- dev_dbg(&client->dev, "%s: real=%d\n", __FUNCTION__, *trim);
+ dev_dbg(&client->dev, "%s: real=%d\n", __func__, *trim);
return 0;
}
@@ -377,7 +352,7 @@ static int x1205_validate_client(struct i2c_client *client)
if ((xfer = i2c_transfer(client->adapter, msgs, 2)) != 2) {
dev_err(&client->dev,
"%s: could not read register %x\n",
- __FUNCTION__, probe_zero_pattern[i]);
+ __func__, probe_zero_pattern[i]);
return -EIO;
}
@@ -385,7 +360,7 @@ static int x1205_validate_client(struct i2c_client *client)
if ((buf & probe_zero_pattern[i+1]) != 0) {
dev_err(&client->dev,
"%s: register=%02x, zero pattern=%d, value=%x\n",
- __FUNCTION__, probe_zero_pattern[i], i, buf);
+ __func__, probe_zero_pattern[i], i, buf);
return -ENODEV;
}
@@ -405,7 +380,7 @@ static int x1205_validate_client(struct i2c_client *client)
if ((xfer = i2c_transfer(client->adapter, msgs, 2)) != 2) {
dev_err(&client->dev,
"%s: could not read register %x\n",
- __FUNCTION__, probe_limits_pattern[i].reg);
+ __func__, probe_limits_pattern[i].reg);
return -EIO;
}
@@ -416,7 +391,7 @@ static int x1205_validate_client(struct i2c_client *client)
value < probe_limits_pattern[i].min) {
dev_dbg(&client->dev,
"%s: register=%x, lim pattern=%d, value=%d\n",
- __FUNCTION__, probe_limits_pattern[i].reg,
+ __func__, probe_limits_pattern[i].reg,
i, value);
return -ENODEV;
@@ -497,58 +472,49 @@ static ssize_t x1205_sysfs_show_dtrim(struct device *dev,
}
static DEVICE_ATTR(dtrim, S_IRUGO, x1205_sysfs_show_dtrim, NULL);
-static int x1205_attach(struct i2c_adapter *adapter)
+static int x1205_sysfs_register(struct device *dev)
+{
+ int err;
+
+ err = device_create_file(dev, &dev_attr_atrim);
+ if (err)
+ return err;
+
+ err = device_create_file(dev, &dev_attr_dtrim);
+ if (err)
+ device_remove_file(dev, &dev_attr_atrim);
+
+ return err;
+}
+
+static void x1205_sysfs_unregister(struct device *dev)
{
- return i2c_probe(adapter, &addr_data, x1205_probe);
+ device_remove_file(dev, &dev_attr_atrim);
+ device_remove_file(dev, &dev_attr_dtrim);
}
-static int x1205_probe(struct i2c_adapter *adapter, int address, int kind)
+
+static int x1205_probe(struct i2c_client *client)
{
int err = 0;
unsigned char sr;
- struct i2c_client *client;
struct rtc_device *rtc;
- dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
- err = -ENODEV;
- goto exit;
- }
-
- if (!(client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) {
- err = -ENOMEM;
- goto exit;
- }
-
- /* I2C client */
- client->addr = address;
- client->driver = &x1205_driver;
- client->adapter = adapter;
+ dev_dbg(&client->dev, "%s\n", __func__);
- strlcpy(client->name, x1205_driver.driver.name, I2C_NAME_SIZE);
-
- /* Verify the chip is really an X1205 */
- if (kind < 0) {
- if (x1205_validate_client(client) < 0) {
- err = -ENODEV;
- goto exit_kfree;
- }
- }
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
- /* Inform the i2c layer */
- if ((err = i2c_attach_client(client)))
- goto exit_kfree;
+ if (x1205_validate_client(client) < 0)
+ return -ENODEV;
dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
rtc = rtc_device_register(x1205_driver.driver.name, &client->dev,
&x1205_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- err = PTR_ERR(rtc);
- goto exit_detach;
- }
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
i2c_set_clientdata(client, rtc);
@@ -565,45 +531,35 @@ static int x1205_probe(struct i2c_adapter *adapter, int address, int kind)
else
dev_err(&client->dev, "couldn't read status\n");
- err = device_create_file(&client->dev, &dev_attr_atrim);
- if (err) goto exit_devreg;
- err = device_create_file(&client->dev, &dev_attr_dtrim);
- if (err) goto exit_atrim;
+ err = x1205_sysfs_register(&client->dev);
+ if (err)
+ goto exit_devreg;
return 0;
-exit_atrim:
- device_remove_file(&client->dev, &dev_attr_atrim);
-
exit_devreg:
rtc_device_unregister(rtc);
-exit_detach:
- i2c_detach_client(client);
-
-exit_kfree:
- kfree(client);
-
-exit:
return err;
}
-static int x1205_detach(struct i2c_client *client)
+static int x1205_remove(struct i2c_client *client)
{
- int err;
struct rtc_device *rtc = i2c_get_clientdata(client);
- if (rtc)
- rtc_device_unregister(rtc);
-
- if ((err = i2c_detach_client(client)))
- return err;
-
- kfree(client);
-
+ rtc_device_unregister(rtc);
+ x1205_sysfs_unregister(&client->dev);
return 0;
}
+static struct i2c_driver x1205_driver = {
+ .driver = {
+ .name = "rtc-x1205",
+ },
+ .probe = x1205_probe,
+ .remove = x1205_remove,
+};
+
static int __init x1205_init(void)
{
return i2c_add_driver(&x1205_driver);
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index 5a888704a8d0..4f4e7cf105d4 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -5,7 +5,7 @@
CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
obj-y += s390mach.o sysinfo.o s390_rdev.o
-obj-y += cio/ block/ char/ crypto/ net/ scsi/
+obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
drivers-y += drivers/s390/built-in.o
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 04787eab1016..bb52d2fbac18 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -36,7 +36,7 @@ static int dcssblk_open(struct inode *inode, struct file *filp);
static int dcssblk_release(struct inode *inode, struct file *filp);
static int dcssblk_make_request(struct request_queue *q, struct bio *bio);
static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
- unsigned long *data);
+ void **kaddr, unsigned long *pfn);
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
@@ -636,7 +636,7 @@ fail:
static int
dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
- unsigned long *data)
+ void **kaddr, unsigned long *pfn)
{
struct dcssblk_dev_info *dev_info;
unsigned long pgoff;
@@ -649,7 +649,9 @@ dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
pgoff = secnum / (PAGE_SIZE / 512);
if ((pgoff+1)*PAGE_SIZE-1 > dev_info->end - dev_info->start)
return -ERANGE;
- *data = (unsigned long) (dev_info->start+pgoff*PAGE_SIZE);
+ *kaddr = (void *) (dev_info->start+pgoff*PAGE_SIZE);
+ *pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT;
+
return 0;
}
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/kvm/Makefile
new file mode 100644
index 000000000000..4a5ec39f9ca6
--- /dev/null
+++ b/drivers/s390/kvm/Makefile
@@ -0,0 +1,9 @@
+# Makefile for kvm guest drivers on s390
+#
+# Copyright IBM Corp. 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (version 2 only)
+# as published by the Free Software Foundation.
+
+obj-$(CONFIG_VIRTIO) += kvm_virtio.o
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
new file mode 100644
index 000000000000..bbef3764fbf8
--- /dev/null
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -0,0 +1,338 @@
+/*
+ * kvm_virtio.c - virtio for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/err.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/interrupt.h>
+#include <linux/virtio_ring.h>
+#include <asm/io.h>
+#include <asm/kvm_para.h>
+#include <asm/kvm_virtio.h>
+#include <asm/setup.h>
+#include <asm/s390_ext.h>
+
+#define VIRTIO_SUBCODE_64 0x0D00
+
+/*
+ * The pointer to our (page) of device descriptions.
+ */
+static void *kvm_devices;
+
+/*
+ * Unique numbering for kvm devices.
+ */
+static unsigned int dev_index;
+
+struct kvm_device {
+ struct virtio_device vdev;
+ struct kvm_device_desc *desc;
+};
+
+#define to_kvmdev(vd) container_of(vd, struct kvm_device, vdev)
+
+/*
+ * memory layout:
+ * - kvm_device_descriptor
+ * struct kvm_device_desc
+ * - configuration
+ * struct kvm_vqconfig
+ * - feature bits
+ * - config space
+ */
+static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc)
+{
+ return (struct kvm_vqconfig *)(desc + 1);
+}
+
+static u8 *kvm_vq_features(const struct kvm_device_desc *desc)
+{
+ return (u8 *)(kvm_vq_config(desc) + desc->num_vq);
+}
+
+static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc)
+{
+ return kvm_vq_features(desc) + desc->feature_len * 2;
+}
+
+/*
+ * The total size of the config page used by this device (incl. desc)
+ */
+static unsigned desc_size(const struct kvm_device_desc *desc)
+{
+ return sizeof(*desc)
+ + desc->num_vq * sizeof(struct kvm_vqconfig)
+ + desc->feature_len * 2
+ + desc->config_len;
+}
+
+/*
+ * This tests (and acknowleges) a feature bit.
+ */
+static bool kvm_feature(struct virtio_device *vdev, unsigned fbit)
+{
+ struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+ u8 *features;
+
+ if (fbit / 8 > desc->feature_len)
+ return false;
+
+ features = kvm_vq_features(desc);
+ if (!(features[fbit / 8] & (1 << (fbit % 8))))
+ return false;
+
+ /*
+ * We set the matching bit in the other half of the bitmap to tell the
+ * Host we want to use this feature.
+ */
+ features[desc->feature_len + fbit / 8] |= (1 << (fbit % 8));
+ return true;
+}
+
+/*
+ * Reading and writing elements in config space
+ */
+static void kvm_get(struct virtio_device *vdev, unsigned int offset,
+ void *buf, unsigned len)
+{
+ struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+
+ BUG_ON(offset + len > desc->config_len);
+ memcpy(buf, kvm_vq_configspace(desc) + offset, len);
+}
+
+static void kvm_set(struct virtio_device *vdev, unsigned int offset,
+ const void *buf, unsigned len)
+{
+ struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+
+ BUG_ON(offset + len > desc->config_len);
+ memcpy(kvm_vq_configspace(desc) + offset, buf, len);
+}
+
+/*
+ * The operations to get and set the status word just access
+ * the status field of the device descriptor. set_status will also
+ * make a hypercall to the host, to tell about status changes
+ */
+static u8 kvm_get_status(struct virtio_device *vdev)
+{
+ return to_kvmdev(vdev)->desc->status;
+}
+
+static void kvm_set_status(struct virtio_device *vdev, u8 status)
+{
+ BUG_ON(!status);
+ to_kvmdev(vdev)->desc->status = status;
+ kvm_hypercall1(KVM_S390_VIRTIO_SET_STATUS,
+ (unsigned long) to_kvmdev(vdev)->desc);
+}
+
+/*
+ * To reset the device, we use the KVM_VIRTIO_RESET hypercall, using the
+ * descriptor address. The Host will zero the status and all the
+ * features.
+ */
+static void kvm_reset(struct virtio_device *vdev)
+{
+ kvm_hypercall1(KVM_S390_VIRTIO_RESET,
+ (unsigned long) to_kvmdev(vdev)->desc);
+}
+
+/*
+ * When the virtio_ring code wants to notify the Host, it calls us here and we
+ * make a hypercall. We hand the address of the virtqueue so the Host
+ * knows which virtqueue we're talking about.
+ */
+static void kvm_notify(struct virtqueue *vq)
+{
+ struct kvm_vqconfig *config = vq->priv;
+
+ kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
+}
+
+/*
+ * This routine finds the first virtqueue described in the configuration of
+ * this device and sets it up.
+ */
+static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
+ unsigned index,
+ void (*callback)(struct virtqueue *vq))
+{
+ struct kvm_device *kdev = to_kvmdev(vdev);
+ struct kvm_vqconfig *config;
+ struct virtqueue *vq;
+ int err;
+
+ if (index >= kdev->desc->num_vq)
+ return ERR_PTR(-ENOENT);
+
+ config = kvm_vq_config(kdev->desc)+index;
+
+ if (add_shared_memory(config->address,
+ vring_size(config->num, PAGE_SIZE))) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ vq = vring_new_virtqueue(config->num, vdev, (void *) config->address,
+ kvm_notify, callback);
+ if (!vq) {
+ err = -ENOMEM;
+ goto unmap;
+ }
+
+ /*
+ * register a callback token
+ * The host will sent this via the external interrupt parameter
+ */
+ config->token = (u64) vq;
+
+ vq->priv = config;
+ return vq;
+unmap:
+ remove_shared_memory(config->address, vring_size(config->num,
+ PAGE_SIZE));
+out:
+ return ERR_PTR(err);
+}
+
+static void kvm_del_vq(struct virtqueue *vq)
+{
+ struct kvm_vqconfig *config = vq->priv;
+
+ vring_del_virtqueue(vq);
+ remove_shared_memory(config->address,
+ vring_size(config->num, PAGE_SIZE));
+}
+
+/*
+ * The config ops structure as defined by virtio config
+ */
+static struct virtio_config_ops kvm_vq_configspace_ops = {
+ .feature = kvm_feature,
+ .get = kvm_get,
+ .set = kvm_set,
+ .get_status = kvm_get_status,
+ .set_status = kvm_set_status,
+ .reset = kvm_reset,
+ .find_vq = kvm_find_vq,
+ .del_vq = kvm_del_vq,
+};
+
+/*
+ * The root device for the kvm virtio devices.
+ * This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2.
+ */
+static struct device kvm_root = {
+ .parent = NULL,
+ .bus_id = "kvm_s390",
+};
+
+/*
+ * adds a new device and register it with virtio
+ * appropriate drivers are loaded by the device model
+ */
+static void add_kvm_device(struct kvm_device_desc *d)
+{
+ struct kvm_device *kdev;
+
+ kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
+ if (!kdev) {
+ printk(KERN_EMERG "Cannot allocate kvm dev %u\n",
+ dev_index++);
+ return;
+ }
+
+ kdev->vdev.dev.parent = &kvm_root;
+ kdev->vdev.index = dev_index++;
+ kdev->vdev.id.device = d->type;
+ kdev->vdev.config = &kvm_vq_configspace_ops;
+ kdev->desc = d;
+
+ if (register_virtio_device(&kdev->vdev) != 0) {
+ printk(KERN_ERR "Failed to register kvm device %u\n",
+ kdev->vdev.index);
+ kfree(kdev);
+ }
+}
+
+/*
+ * scan_devices() simply iterates through the device page.
+ * The type 0 is reserved to mean "end of devices".
+ */
+static void scan_devices(void)
+{
+ unsigned int i;
+ struct kvm_device_desc *d;
+
+ for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
+ d = kvm_devices + i;
+
+ if (d->type == 0)
+ break;
+
+ add_kvm_device(d);
+ }
+}
+
+/*
+ * we emulate the request_irq behaviour on top of s390 extints
+ */
+static void kvm_extint_handler(u16 code)
+{
+ void *data = (void *) *(long *) __LC_PFAULT_INTPARM;
+ u16 subcode = S390_lowcore.cpu_addr;
+
+ if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
+ return;
+
+ vring_interrupt(0, data);
+}
+
+/*
+ * Init function for virtio
+ * devices are in a single page above top of "normal" mem
+ */
+static int __init kvm_devices_init(void)
+{
+ int rc;
+
+ if (!MACHINE_IS_KVM)
+ return -ENODEV;
+
+ rc = device_register(&kvm_root);
+ if (rc) {
+ printk(KERN_ERR "Could not register kvm_s390 root device");
+ return rc;
+ }
+
+ if (add_shared_memory((max_pfn) << PAGE_SHIFT, PAGE_SIZE)) {
+ device_unregister(&kvm_root);
+ return -ENOMEM;
+ }
+
+ kvm_devices = (void *) (max_pfn << PAGE_SHIFT);
+
+ ctl_set_bit(0, 9);
+ register_external_interrupt(0x2603, kvm_extint_handler);
+
+ scan_devices();
+ return 0;
+}
+
+/*
+ * We do this after core stuff, but before the drivers.
+ */
+postcore_initcall(kvm_devices_init);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 7c3f02816e95..9af2330f07a2 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1927,7 +1927,8 @@ zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
/* setup new FSF request */
retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA,
- 0, NULL, &lock_flags, &fsf_req);
+ ZFCP_WAIT_FOR_SBAL, NULL, &lock_flags,
+ &fsf_req);
if (retval) {
ZFCP_LOG_INFO("error: Could not create exchange configuration "
"data request for adapter %s.\n",
@@ -2035,21 +2036,21 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
min(FC_SERIAL_NUMBER_SIZE, 17));
}
- ZFCP_LOG_NORMAL("The adapter %s reported the following "
- "characteristics:\n"
- "WWNN 0x%016Lx, "
- "WWPN 0x%016Lx, "
- "S_ID 0x%06x,\n"
- "adapter version 0x%x, "
- "LIC version 0x%x, "
- "FC link speed %d Gb/s\n",
- zfcp_get_busid_by_adapter(adapter),
- (wwn_t) fc_host_node_name(shost),
- (wwn_t) fc_host_port_name(shost),
- fc_host_port_id(shost),
- adapter->hydra_version,
- adapter->fsf_lic_version,
- fc_host_speed(shost));
+ if (fsf_req->erp_action)
+ ZFCP_LOG_NORMAL("The adapter %s reported the following "
+ "characteristics:\n"
+ "WWNN 0x%016Lx, WWPN 0x%016Lx, "
+ "S_ID 0x%06x,\n"
+ "adapter version 0x%x, "
+ "LIC version 0x%x, "
+ "FC link speed %d Gb/s\n",
+ zfcp_get_busid_by_adapter(adapter),
+ (wwn_t) fc_host_node_name(shost),
+ (wwn_t) fc_host_port_name(shost),
+ fc_host_port_id(shost),
+ adapter->hydra_version,
+ adapter->fsf_lic_version,
+ fc_host_speed(shost));
if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) {
ZFCP_LOG_NORMAL("error: the adapter %s "
"only supports newer control block "
@@ -2114,8 +2115,10 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
zfcp_erp_adapter_shutdown(adapter, 0, 127, fsf_req);
return -EIO;
case FC_PORTTYPE_NPORT:
- ZFCP_LOG_NORMAL("Switched fabric fibrechannel "
- "network detected at adapter %s.\n",
+ if (fsf_req->erp_action)
+ ZFCP_LOG_NORMAL("Switched fabric fibrechannel "
+ "network detected at adapter "
+ "%s.\n",
zfcp_get_busid_by_adapter(adapter));
break;
default:
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 8cce5cc11d50..099970b27001 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -213,6 +213,7 @@
#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
#define FSF_FEATURE_UPDATE_ALERT 0x00000100
+#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
/* host connection features */
#define FSF_FEATURE_NPIV_MODE 0x00000001
@@ -340,6 +341,15 @@ struct fsf_qtcb_prefix {
u8 res1[20];
} __attribute__ ((packed));
+struct fsf_statistics_info {
+ u64 input_req;
+ u64 output_req;
+ u64 control_req;
+ u64 input_mb;
+ u64 output_mb;
+ u64 seconds_act;
+} __attribute__ ((packed));
+
union fsf_status_qual {
u8 byte[FSF_STATUS_QUALIFIER_SIZE];
u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)];
@@ -436,7 +446,8 @@ struct fsf_qtcb_bottom_config {
u32 hardware_version;
u8 serial_number[32];
struct fsf_nport_serv_param plogi_payload;
- u8 res4[160];
+ struct fsf_statistics_info stat_info;
+ u8 res4[112];
} __attribute__ ((packed));
struct fsf_qtcb_bottom_port {
@@ -469,7 +480,10 @@ struct fsf_qtcb_bottom_port {
u64 control_requests;
u64 input_mb; /* where 1 MByte == 1.000.000 Bytes */
u64 output_mb; /* where 1 MByte == 1.000.000 Bytes */
- u8 res2[256];
+ u8 cp_util;
+ u8 cb_util;
+ u8 a_util;
+ u8 res2[253];
} __attribute__ ((packed));
union fsf_qtcb_bottom {
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index f81850624eed..01687559dc06 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -40,6 +40,7 @@ static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int,
unsigned int, unsigned int);
static struct device_attribute *zfcp_sysfs_sdev_attrs[];
+static struct device_attribute *zfcp_a_stats_attrs[];
struct zfcp_data zfcp_data = {
.scsi_host_template = {
@@ -61,6 +62,7 @@ struct zfcp_data zfcp_data = {
.use_clustering = 1,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
.max_sectors = ZFCP_MAX_SECTORS,
+ .shost_attrs = zfcp_a_stats_attrs,
},
.driver_version = ZFCP_VERSION,
};
@@ -809,4 +811,116 @@ static struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
NULL
};
+static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *scsi_host = dev_to_shost(dev);
+ struct fsf_qtcb_bottom_port *qtcb_port;
+ int retval;
+ struct zfcp_adapter *adapter;
+
+ adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
+ if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
+ return -EOPNOTSUPP;
+
+ qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
+ if (!qtcb_port)
+ return -ENOMEM;
+
+ retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port);
+ if (!retval)
+ retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
+ qtcb_port->cb_util, qtcb_port->a_util);
+ kfree(qtcb_port);
+ return retval;
+}
+
+static int zfcp_sysfs_adapter_ex_config(struct device *dev,
+ struct fsf_statistics_info *stat_inf)
+{
+ int retval;
+ struct fsf_qtcb_bottom_config *qtcb_config;
+ struct Scsi_Host *scsi_host = dev_to_shost(dev);
+ struct zfcp_adapter *adapter;
+
+ adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
+ if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
+ return -EOPNOTSUPP;
+
+ qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
+ GFP_KERNEL);
+ if (!qtcb_config)
+ return -ENOMEM;
+
+ retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config);
+ if (!retval)
+ *stat_inf = qtcb_config->stat_info;
+
+ kfree(qtcb_config);
+ return retval;
+}
+
+static ssize_t zfcp_sysfs_adapter_request_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fsf_statistics_info stat_info;
+ int retval;
+
+ retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
+ if (retval)
+ return retval;
+
+ return sprintf(buf, "%llu %llu %llu\n",
+ (unsigned long long) stat_info.input_req,
+ (unsigned long long) stat_info.output_req,
+ (unsigned long long) stat_info.control_req);
+}
+
+static ssize_t zfcp_sysfs_adapter_mb_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fsf_statistics_info stat_info;
+ int retval;
+
+ retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
+ if (retval)
+ return retval;
+
+ return sprintf(buf, "%llu %llu\n",
+ (unsigned long long) stat_info.input_mb,
+ (unsigned long long) stat_info.output_mb);
+}
+
+static ssize_t zfcp_sysfs_adapter_sec_active_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fsf_statistics_info stat_info;
+ int retval;
+
+ retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
+ if (retval)
+ return retval;
+
+ return sprintf(buf, "%llu\n",
+ (unsigned long long) stat_info.seconds_act);
+}
+
+static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
+static DEVICE_ATTR(requests, S_IRUGO, zfcp_sysfs_adapter_request_show, NULL);
+static DEVICE_ATTR(megabytes, S_IRUGO, zfcp_sysfs_adapter_mb_show, NULL);
+static DEVICE_ATTR(seconds_active, S_IRUGO,
+ zfcp_sysfs_adapter_sec_active_show, NULL);
+
+static struct device_attribute *zfcp_a_stats_attrs[] = {
+ &dev_attr_utilization,
+ &dev_attr_requests,
+ &dev_attr_megabytes,
+ &dev_attr_seconds_active,
+ NULL
+};
+
#undef ZFCP_LOG_AREA
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index b374e457e5e2..b898d382b7b0 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -1499,7 +1499,7 @@ static void FlashPoint_StartCCB(unsigned long pCurrCard, struct sccb *p_Sccb)
thisCard = ((struct sccb_card *)pCurrCard)->cardIndex;
ioport = ((struct sccb_card *)pCurrCard)->ioPort;
- if ((p_Sccb->TargID > MAX_SCSI_TAR) || (p_Sccb->Lun > MAX_LUN)) {
+ if ((p_Sccb->TargID >= MAX_SCSI_TAR) || (p_Sccb->Lun >= MAX_LUN)) {
p_Sccb->HostStatus = SCCB_COMPLETE;
p_Sccb->SccbStatus = SCCB_ERROR;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 7f78e3ea517d..99c57b0c1d54 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1677,6 +1677,16 @@ config MAC_SCSI
SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
+config SCSI_MAC_ESP
+ tristate "Macintosh NCR53c9[46] SCSI"
+ depends on MAC && SCSI
+ help
+ This is the NCR 53c9x SCSI controller found on most of the 68040
+ based Macintoshes.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mac_esp.
+
config MVME147_SCSI
bool "WD33C93 SCSI driver for MVME147"
depends on MVME147 && SCSI=y
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 23e6ecbd4778..6c775e350c98 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_MVME147_SCSI) += mvme147.o wd33c93.o
obj-$(CONFIG_SGIWD93_SCSI) += sgiwd93.o wd33c93.o
obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o
obj-$(CONFIG_MAC_SCSI) += mac_scsi.o
+obj-$(CONFIG_SCSI_MAC_ESP) += esp_scsi.o mac_esp.o
obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o
obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o
obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 6ccdc96cc480..a09b2d3fdf5a 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -1432,15 +1432,10 @@ static void run(struct work_struct *work)
*/
static irqreturn_t intr(int irqno, void *dev_id)
{
- struct Scsi_Host *shpnt = (struct Scsi_Host *)dev_id;
+ struct Scsi_Host *shpnt = dev_id;
unsigned long flags;
unsigned char rev, dmacntrl0;
- if (!shpnt) {
- printk(KERN_ERR "aha152x: catched interrupt %d for unknown controller.\n", irqno);
- return IRQ_NONE;
- }
-
/*
* Read a couple of registers that are known to not be all 1's. If
* we read all 1's (-1), that means that either:
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 5a1471c370fa..80594947c6f6 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -153,8 +153,6 @@ struct aha1542_hostdata {
#define HOSTDATA(host) ((struct aha1542_hostdata *) &host->hostdata)
-static struct Scsi_Host *aha_host[7]; /* One for each IRQ level (9-15) */
-
static DEFINE_SPINLOCK(aha1542_lock);
@@ -163,8 +161,7 @@ static DEFINE_SPINLOCK(aha1542_lock);
static void setup_mailboxes(int base_io, struct Scsi_Host *shpnt);
static int aha1542_restart(struct Scsi_Host *shost);
-static void aha1542_intr_handle(struct Scsi_Host *shost, void *dev_id);
-static irqreturn_t do_aha1542_intr_handle(int irq, void *dev_id);
+static void aha1542_intr_handle(struct Scsi_Host *shost);
#define aha1542_intr_reset(base) outb(IRST, CONTROL(base))
@@ -404,23 +401,19 @@ fail:
}
/* A quick wrapper for do_aha1542_intr_handle to grab the spin lock */
-static irqreturn_t do_aha1542_intr_handle(int irq, void *dev_id)
+static irqreturn_t do_aha1542_intr_handle(int dummy, void *dev_id)
{
unsigned long flags;
- struct Scsi_Host *shost;
-
- shost = aha_host[irq - 9];
- if (!shost)
- panic("Splunge!");
+ struct Scsi_Host *shost = dev_id;
spin_lock_irqsave(shost->host_lock, flags);
- aha1542_intr_handle(shost, dev_id);
+ aha1542_intr_handle(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
return IRQ_HANDLED;
}
/* A "high" level interrupt handler */
-static void aha1542_intr_handle(struct Scsi_Host *shost, void *dev_id)
+static void aha1542_intr_handle(struct Scsi_Host *shost)
{
void (*my_done) (Scsi_Cmnd *) = NULL;
int errstatus, mbi, mbo, mbistatus;
@@ -1197,7 +1190,8 @@ fail:
DEB(printk("aha1542_detect: enable interrupt channel %d\n", irq_level));
spin_lock_irqsave(&aha1542_lock, flags);
- if (request_irq(irq_level, do_aha1542_intr_handle, 0, "aha1542", NULL)) {
+ if (request_irq(irq_level, do_aha1542_intr_handle, 0,
+ "aha1542", shpnt)) {
printk(KERN_ERR "Unable to allocate IRQ for adaptec controller.\n");
spin_unlock_irqrestore(&aha1542_lock, flags);
goto unregister;
@@ -1205,7 +1199,7 @@ fail:
if (dma_chan != 0xFF) {
if (request_dma(dma_chan, "aha1542")) {
printk(KERN_ERR "Unable to allocate DMA channel for Adaptec.\n");
- free_irq(irq_level, NULL);
+ free_irq(irq_level, shpnt);
spin_unlock_irqrestore(&aha1542_lock, flags);
goto unregister;
}
@@ -1214,7 +1208,7 @@ fail:
enable_dma(dma_chan);
}
}
- aha_host[irq_level - 9] = shpnt;
+
shpnt->this_id = scsi_id;
shpnt->unique_id = base_io;
shpnt->io_port = base_io;
@@ -1276,7 +1270,7 @@ unregister:
static int aha1542_release(struct Scsi_Host *shost)
{
if (shost->irq)
- free_irq(shost->irq, NULL);
+ free_irq(shost->irq, shost);
if (shost->dma_channel != 0xff)
free_dma(shost->dma_channel);
if (shost->io_port && shost->n_io_port)
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index 2f00467b6b8c..be5558ab84ea 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -815,7 +815,7 @@ struct ahd_tmode_tstate {
struct ahd_phase_table_entry {
uint8_t phase;
uint8_t mesg_out; /* Message response to parity errors */
- char *phasemsg;
+ const char *phasemsg;
};
/************************** Serial EEPROM Format ******************************/
@@ -1314,7 +1314,7 @@ typedef int (ahd_device_setup_t)(struct ahd_softc *);
struct ahd_pci_identity {
uint64_t full_id;
uint64_t id_mask;
- char *name;
+ const char *name;
ahd_device_setup_t *setup;
};
@@ -1322,7 +1322,7 @@ struct ahd_pci_identity {
struct aic7770_identity {
uint32_t full_id;
uint32_t id_mask;
- char *name;
+ const char *name;
ahd_device_setup_t *setup;
};
extern struct aic7770_identity aic7770_ident_table [];
@@ -1333,12 +1333,11 @@ extern const int ahd_num_aic7770_devs;
/*************************** Function Declarations ****************************/
/******************************************************************************/
-void ahd_reset_cmds_pending(struct ahd_softc *ahd);
/***************************** PCI Front End *********************************/
-struct ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t);
+const struct ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t);
int ahd_pci_config(struct ahd_softc *,
- struct ahd_pci_identity *);
+ const struct ahd_pci_identity *);
int ahd_pci_test_register_access(struct ahd_softc *);
#ifdef CONFIG_PM
void ahd_pci_suspend(struct ahd_softc *);
@@ -1376,16 +1375,6 @@ int ahd_write_flexport(struct ahd_softc *ahd,
int ahd_read_flexport(struct ahd_softc *ahd, u_int addr,
uint8_t *value);
-/*************************** Interrupt Services *******************************/
-void ahd_run_qoutfifo(struct ahd_softc *ahd);
-#ifdef AHD_TARGET_MODE
-void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused);
-#endif
-void ahd_handle_hwerrint(struct ahd_softc *ahd);
-void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat);
-void ahd_handle_scsiint(struct ahd_softc *ahd,
- u_int intstat);
-
/***************************** Error Recovery *********************************/
typedef enum {
SEARCH_COMPLETE,
@@ -1479,7 +1468,7 @@ extern uint32_t ahd_debug;
void ahd_print_devinfo(struct ahd_softc *ahd,
struct ahd_devinfo *devinfo);
void ahd_dump_card_state(struct ahd_softc *ahd);
-int ahd_print_register(ahd_reg_parse_entry_t *table,
+int ahd_print_register(const ahd_reg_parse_entry_t *table,
u_int num_entries,
const char *name,
u_int address,
diff --git a/drivers/scsi/aic7xxx/aic79xx.reg b/drivers/scsi/aic7xxx/aic79xx.reg
index be14e2ecb8f7..cca16fc5b4ad 100644
--- a/drivers/scsi/aic7xxx/aic79xx.reg
+++ b/drivers/scsi/aic7xxx/aic79xx.reg
@@ -198,6 +198,7 @@ register SEQINTCODE {
register CLRINT {
address 0x003
access_mode WO
+ count 19
field CLRHWERRINT 0x80 /* Rev B or greater */
field CLRBRKADRINT 0x40
field CLRSWTMINT 0x20
@@ -245,6 +246,7 @@ register CLRERR {
register HCNTRL {
address 0x005
access_mode RW
+ count 12
field SEQ_RESET 0x80 /* Rev B or greater */
field POWRDN 0x40
field SWINT 0x10
@@ -262,6 +264,7 @@ register HNSCB_QOFF {
address 0x006
access_mode RW
size 2
+ count 2
}
/*
@@ -270,6 +273,7 @@ register HNSCB_QOFF {
register HESCB_QOFF {
address 0x008
access_mode RW
+ count 2
}
/*
@@ -287,6 +291,7 @@ register HS_MAILBOX {
*/
register SEQINTSTAT {
address 0x00C
+ count 1
access_mode RO
field SEQ_SWTMRTO 0x10
field SEQ_SEQINT 0x08
@@ -332,6 +337,7 @@ register SNSCB_QOFF {
*/
register SESCB_QOFF {
address 0x012
+ count 2
access_mode RW
modes M_CCHAN
}
@@ -397,6 +403,7 @@ register DFCNTRL {
address 0x019
access_mode RW
modes M_DFF0, M_DFF1
+ count 11
field PRELOADEN 0x80
field SCSIENWRDIS 0x40 /* Rev B only. */
field SCSIEN 0x20
@@ -415,6 +422,7 @@ register DFCNTRL {
*/
register DSCOMMAND0 {
address 0x019
+ count 1
access_mode RW
modes M_CFG
field CACHETHEN 0x80 /* Cache Threshold enable */
@@ -580,6 +588,7 @@ register DFF_THRSH {
address 0x088
access_mode RW
modes M_CFG
+ count 1
field WR_DFTHRSH 0x70 {
WR_DFTHRSH_MIN,
WR_DFTHRSH_25,
@@ -800,6 +809,7 @@ register PCIXCTL {
address 0x093
access_mode RW
modes M_CFG
+ count 1
field SERRPULSE 0x80
field UNEXPSCIEN 0x20
field SPLTSMADIS 0x10
@@ -844,6 +854,7 @@ register DCHSPLTSTAT0 {
address 0x096
access_mode RW
modes M_DFF0, M_DFF1
+ count 2
field STAETERM 0x80
field SCBCERR 0x40
field SCADERR 0x20
@@ -895,6 +906,7 @@ register DCHSPLTSTAT1 {
address 0x097
access_mode RW
modes M_DFF0, M_DFF1
+ count 2
field RXDATABUCKET 0x01
}
@@ -1048,6 +1060,7 @@ register SGSPLTSTAT0 {
address 0x09E
access_mode RW
modes M_DFF0, M_DFF1
+ count 2
field STAETERM 0x80
field SCBCERR 0x40
field SCADERR 0x20
@@ -1065,6 +1078,7 @@ register SGSPLTSTAT1 {
address 0x09F
access_mode RW
modes M_DFF0, M_DFF1
+ count 2
field RXDATABUCKET 0x01
}
@@ -1086,6 +1100,7 @@ register DF0PCISTAT {
address 0x0A0
access_mode RW
modes M_CFG
+ count 1
field DPE 0x80
field SSE 0x40
field RMA 0x20
@@ -1184,6 +1199,7 @@ register TARGPCISTAT {
address 0x0A7
access_mode RW
modes M_CFG
+ count 5
field DPE 0x80
field SSE 0x40
field STA 0x08
@@ -1198,6 +1214,7 @@ register LQIN {
address 0x020
access_mode RW
size 20
+ count 2
modes M_DFF0, M_DFF1, M_SCSI
}
@@ -1229,6 +1246,7 @@ register LUNPTR {
address 0x022
access_mode RW
modes M_CFG
+ count 2
}
/*
@@ -1259,6 +1277,7 @@ register CMDLENPTR {
address 0x025
access_mode RW
modes M_CFG
+ count 1
}
/*
@@ -1270,6 +1289,7 @@ register ATTRPTR {
address 0x026
access_mode RW
modes M_CFG
+ count 1
}
/*
@@ -1281,6 +1301,7 @@ register FLAGPTR {
address 0x027
access_mode RW
modes M_CFG
+ count 1
}
/*
@@ -1291,6 +1312,7 @@ register CMDPTR {
address 0x028
access_mode RW
modes M_CFG
+ count 1
}
/*
@@ -1301,6 +1323,7 @@ register QNEXTPTR {
address 0x029
access_mode RW
modes M_CFG
+ count 1
}
/*
@@ -1323,6 +1346,7 @@ register ABRTBYTEPTR {
address 0x02B
access_mode RW
modes M_CFG
+ count 1
}
/*
@@ -1333,6 +1357,7 @@ register ABRTBITPTR {
address 0x02C
access_mode RW
modes M_CFG
+ count 1
}
/*
@@ -1370,6 +1395,7 @@ register LUNLEN {
address 0x030
access_mode RW
modes M_CFG
+ count 2
mask ILUNLEN 0x0F
mask TLUNLEN 0xF0
}
@@ -1383,6 +1409,7 @@ register CDBLIMIT {
address 0x031
access_mode RW
modes M_CFG
+ count 1
}
/*
@@ -1394,6 +1421,7 @@ register MAXCMD {
address 0x032
access_mode RW
modes M_CFG
+ count 9
}
/*
@@ -1458,6 +1486,7 @@ register LQCTL1 {
address 0x038
access_mode RW
modes M_DFF0, M_DFF1, M_SCSI
+ count 2
field PCI2PCI 0x04
field SINGLECMD 0x02
field ABORTPENDING 0x01
@@ -1470,6 +1499,7 @@ register LQCTL2 {
address 0x039
access_mode RW
modes M_DFF0, M_DFF1, M_SCSI
+ count 5
field LQIRETRY 0x80
field LQICONTINUE 0x40
field LQITOIDLE 0x20
@@ -1528,6 +1558,7 @@ register SCSISEQ1 {
address 0x03B
access_mode RW
modes M_DFF0, M_DFF1, M_SCSI
+ count 8
field MANUALCTL 0x40
field ENSELI 0x20
field ENRSELI 0x10
@@ -1667,6 +1698,9 @@ register SCSISIGO {
}
}
+/*
+ * SCSI Control Signal In
+ */
register SCSISIGI {
address 0x041
access_mode RO
@@ -1703,6 +1737,7 @@ register MULTARGID {
access_mode RW
modes M_CFG
size 2
+ count 2
}
/*
@@ -1758,6 +1793,7 @@ register TARGIDIN {
address 0x048
access_mode RO
modes M_DFF0, M_DFF1, M_SCSI
+ count 2
field CLKOUT 0x80
field TARGID 0x0F
}
@@ -1798,6 +1834,7 @@ register OPTIONMODE {
address 0x04A
access_mode RW
modes M_CFG
+ count 4
field BIOSCANCTL 0x80
field AUTOACKEN 0x40
field BIASCANCTL 0x20
@@ -1850,6 +1887,7 @@ register SIMODE0 {
address 0x04B
access_mode RW
modes M_CFG
+ count 8
field ENSELDO 0x40
field ENSELDI 0x20
field ENSELINGO 0x10
@@ -1945,6 +1983,7 @@ register PERRDIAG {
address 0x04E
access_mode RO
modes M_DFF0, M_DFF1, M_SCSI
+ count 3
field HIZERO 0x80
field HIPERR 0x40
field PREVPHASE 0x20
@@ -1962,6 +2001,7 @@ register LQISTATE {
address 0x04E
access_mode RO
modes M_CFG
+ count 6
}
/*
@@ -1971,6 +2011,7 @@ register SOFFCNT {
address 0x04F
access_mode RO
modes M_DFF0, M_DFF1, M_SCSI
+ count 1
}
/*
@@ -1980,6 +2021,7 @@ register LQOSTATE {
address 0x04F
access_mode RO
modes M_CFG
+ count 2
}
/*
@@ -1989,6 +2031,7 @@ register LQISTAT0 {
address 0x050
access_mode RO
modes M_DFF0, M_DFF1, M_SCSI
+ count 2
field LQIATNQAS 0x20
field LQICRCT1 0x10
field LQICRCT2 0x08
@@ -2004,6 +2047,7 @@ register CLRLQIINT0 {
address 0x050
access_mode WO
modes M_DFF0, M_DFF1, M_SCSI
+ count 1
field CLRLQIATNQAS 0x20
field CLRLQICRCT1 0x10
field CLRLQICRCT2 0x08
@@ -2019,6 +2063,7 @@ register LQIMODE0 {
address 0x050
access_mode RW
modes M_CFG
+ count 3
field ENLQIATNQASK 0x20
field ENLQICRCT1 0x10
field ENLQICRCT2 0x08
@@ -2034,6 +2079,7 @@ register LQISTAT1 {
address 0x051
access_mode RO
modes M_DFF0, M_DFF1, M_SCSI
+ count 3
field LQIPHASE_LQ 0x80
field LQIPHASE_NLQ 0x40
field LQIABORT 0x20
@@ -2051,6 +2097,7 @@ register CLRLQIINT1 {
address 0x051
access_mode WO
modes M_DFF0, M_DFF1, M_SCSI
+ count 4
field CLRLQIPHASE_LQ 0x80
field CLRLQIPHASE_NLQ 0x40
field CLRLIQABORT 0x20
@@ -2068,6 +2115,7 @@ register LQIMODE1 {
address 0x051
access_mode RW
modes M_CFG
+ count 4
field ENLQIPHASE_LQ 0x80 /* LQIPHASE1 */
field ENLQIPHASE_NLQ 0x40 /* LQIPHASE2 */
field ENLIQABORT 0x20
@@ -2102,6 +2150,7 @@ register SSTAT3 {
address 0x053
access_mode RO
modes M_DFF0, M_DFF1, M_SCSI
+ count 3
field NTRAMPERR 0x02
field OSRAMPERR 0x01
}
@@ -2113,6 +2162,7 @@ register CLRSINT3 {
address 0x053
access_mode WO
modes M_DFF0, M_DFF1, M_SCSI
+ count 3
field CLRNTRAMPERR 0x02
field CLROSRAMPERR 0x01
}
@@ -2124,6 +2174,7 @@ register SIMODE3 {
address 0x053
access_mode RW
modes M_CFG
+ count 4
field ENNTRAMPERR 0x02
field ENOSRAMPERR 0x01
}
@@ -2135,6 +2186,7 @@ register LQOSTAT0 {
address 0x054
access_mode RO
modes M_DFF0, M_DFF1, M_SCSI
+ count 2
field LQOTARGSCBPERR 0x10
field LQOSTOPT2 0x08
field LQOATNLQ 0x04
@@ -2149,6 +2201,7 @@ register CLRLQOINT0 {
address 0x054
access_mode WO
modes M_DFF0, M_DFF1, M_SCSI
+ count 3
field CLRLQOTARGSCBPERR 0x10
field CLRLQOSTOPT2 0x08
field CLRLQOATNLQ 0x04
@@ -2163,6 +2216,7 @@ register LQOMODE0 {
address 0x054
access_mode RW
modes M_CFG
+ count 4
field ENLQOTARGSCBPERR 0x10
field ENLQOSTOPT2 0x08
field ENLQOATNLQ 0x04
@@ -2191,6 +2245,7 @@ register CLRLQOINT1 {
address 0x055
access_mode WO
modes M_DFF0, M_DFF1, M_SCSI
+ count 7
field CLRLQOINITSCBPERR 0x10
field CLRLQOSTOPI2 0x08
field CLRLQOBADQAS 0x04
@@ -2205,6 +2260,7 @@ register LQOMODE1 {
address 0x055
access_mode RW
modes M_CFG
+ count 4
field ENLQOINITSCBPERR 0x10
field ENLQOSTOPI2 0x08
field ENLQOBADQAS 0x04
@@ -2232,6 +2288,7 @@ register OS_SPACE_CNT {
address 0x056
access_mode RO
modes M_CFG
+ count 2
}
/*
@@ -2286,13 +2343,19 @@ register NEXTSCB {
modes M_SCSI
}
-/* Rev B only. */
+/*
+ * LQO SCSI Control
+ * (Rev B only.)
+ */
register LQOSCSCTL {
address 0x05A
access_mode RW
size 1
modes M_CFG
+ count 1
field LQOH2A_VERSION 0x80
+ field LQOBUSETDLY 0x40
+ field LQONOHOLDLACK 0x02
field LQONOCHKOVER 0x01
}
@@ -2459,6 +2522,7 @@ register NEGPERIOD {
address 0x061
access_mode RW
modes M_SCSI
+ count 1
}
/*
@@ -2478,6 +2542,7 @@ register NEGOFFSET {
address 0x062
access_mode RW
modes M_SCSI
+ count 1
}
/*
@@ -2487,6 +2552,7 @@ register NEGPPROPTS {
address 0x063
access_mode RW
modes M_SCSI
+ count 1
field PPROPT_PACE 0x08
field PPROPT_QAS 0x04
field PPROPT_DT 0x02
@@ -2516,12 +2582,19 @@ register ANNEXCOL {
address 0x065
access_mode RW
modes M_SCSI
+ count 7
}
+/*
+ * SCSI Check
+ * (Rev. B only)
+ */
register SCSCHKN {
address 0x066
access_mode RW
modes M_CFG
+ count 1
+ field BIDICHKDIS 0x80
field STSELSKIDDIS 0x40
field CURRFIFODEF 0x20
field WIDERESEN 0x10
@@ -2561,6 +2634,7 @@ register ANNEXDAT {
address 0x066
access_mode RW
modes M_SCSI
+ count 3
}
/*
@@ -2596,6 +2670,7 @@ register TOWNID {
address 0x069
access_mode RW
modes M_SCSI
+ count 2
}
/*
@@ -2737,6 +2812,7 @@ register SCBAUTOPTR {
address 0x0AB
access_mode RW
modes M_CFG
+ count 1
field AUSCBPTR_EN 0x80
field SCBPTR_ADDR 0x38
field SCBPTR_OFF 0x07
@@ -2881,6 +2957,7 @@ register BRDDAT {
address 0x0B8
access_mode RW
modes M_SCSI
+ count 2
}
/*
@@ -2890,6 +2967,7 @@ register BRDCTL {
address 0x0B9
access_mode RW
modes M_SCSI
+ count 7
field FLXARBACK 0x80
field FLXARBREQ 0x40
field BRDADDR 0x38
@@ -2905,6 +2983,7 @@ register SEEADR {
address 0x0BA
access_mode RW
modes M_SCSI
+ count 4
}
/*
@@ -2915,6 +2994,7 @@ register SEEDAT {
access_mode RW
size 2
modes M_SCSI
+ count 4
}
/*
@@ -2924,6 +3004,7 @@ register SEESTAT {
address 0x0BE
access_mode RO
modes M_SCSI
+ count 1
field INIT_DONE 0x80
field SEEOPCODE 0x70
field LDALTID_L 0x08
@@ -2939,6 +3020,7 @@ register SEECTL {
address 0x0BE
access_mode RW
modes M_SCSI
+ count 4
field SEEOPCODE 0x70 {
SEEOP_ERASE 0x70,
SEEOP_READ 0x60,
@@ -3000,6 +3082,7 @@ register DSPDATACTL {
address 0x0C1
access_mode RW
modes M_CFG
+ count 3
field BYPASSENAB 0x80
field DESQDIS 0x10
field RCVROFFSTDIS 0x04
@@ -3058,6 +3141,7 @@ register DSPSELECT {
address 0x0C4
access_mode RW
modes M_CFG
+ count 1
field AUTOINCEN 0x80
field DSPSEL 0x1F
}
@@ -3071,6 +3155,7 @@ register WRTBIASCTL {
address 0x0C5
access_mode WO
modes M_CFG
+ count 3
field AUTOXBCDIS 0x80
field XMITMANVAL 0x3F
}
@@ -3196,7 +3281,8 @@ register OVLYADDR {
*/
register SEQCTL0 {
address 0x0D6
- access_mode RW
+ access_mode RW
+ count 11
field PERRORDIS 0x80
field PAUSEDIS 0x40
field FAILDIS 0x20
@@ -3226,7 +3312,8 @@ register SEQCTL1 {
*/
register FLAGS {
address 0x0D8
- access_mode RO
+ access_mode RO
+ count 23
field ZERO 0x02
field CARRY 0x01
}
@@ -3255,7 +3342,8 @@ register SEQINTCTL {
*/
register SEQRAM {
address 0x0DA
- access_mode RW
+ access_mode RW
+ count 2
}
/*
@@ -3266,6 +3354,7 @@ register PRGMCNT {
address 0x0DE
access_mode RW
size 2
+ count 5
}
/*
@@ -3273,7 +3362,7 @@ register PRGMCNT {
*/
register ACCUM {
address 0x0E0
- access_mode RW
+ access_mode RW
accumulator
}
@@ -3401,6 +3490,7 @@ register INTVEC1_ADDR {
access_mode RW
size 2
modes M_CFG
+ count 1
}
/*
@@ -3412,6 +3502,7 @@ register CURADDR {
access_mode RW
size 2
modes M_SCSI
+ count 2
}
/*
@@ -3423,6 +3514,7 @@ register INTVEC2_ADDR {
access_mode RW
size 2
modes M_CFG
+ count 1
}
/*
@@ -3579,6 +3671,7 @@ scratch_ram {
/* Parameters for DMA Logic */
DMAPARAMS {
size 1
+ count 8
field PRELOADEN 0x80
field WIDEODD 0x40
field SCSIEN 0x20
@@ -3648,9 +3741,11 @@ scratch_ram {
*/
KERNEL_TQINPOS {
size 1
+ count 1
}
- TQINPOS {
+ TQINPOS {
size 1
+ count 8
}
/*
* Base address of our shared data with the kernel driver in host
@@ -3681,6 +3776,7 @@ scratch_ram {
}
ARG_2 {
size 1
+ count 1
alias RETURN_2
}
@@ -3698,6 +3794,7 @@ scratch_ram {
*/
SCSISEQ_TEMPLATE {
size 1
+ count 7
field MANUALCTL 0x40
field ENSELI 0x20
field ENRSELI 0x10
@@ -3711,6 +3808,7 @@ scratch_ram {
*/
INITIATOR_TAG {
size 1
+ count 1
}
SEQ_FLAGS2 {
@@ -3777,6 +3875,7 @@ scratch_ram {
*/
CMDSIZE_TABLE {
size 8
+ count 8
}
/*
* When an SCB with the MK_MESSAGE flag is
@@ -3803,8 +3902,8 @@ scratch_ram {
/************************* Hardware SCB Definition ****************************/
scb {
address 0x180
- size 64
- modes 0, 1, 2, 3
+ size 64
+ modes 0, 1, 2, 3
SCB_RESIDUAL_DATACNT {
size 4
alias SCB_CDB_STORE
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index ade0fb8fbdb2..55508b0fcec4 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -52,7 +52,7 @@
/***************************** Lookup Tables **********************************/
-static char *ahd_chip_names[] =
+static const char *const ahd_chip_names[] =
{
"NONE",
"aic7901",
@@ -66,10 +66,10 @@ static const u_int num_chip_names = ARRAY_SIZE(ahd_chip_names);
*/
struct ahd_hard_error_entry {
uint8_t errno;
- char *errmesg;
+ const char *errmesg;
};
-static struct ahd_hard_error_entry ahd_hard_errors[] = {
+static const struct ahd_hard_error_entry ahd_hard_errors[] = {
{ DSCTMOUT, "Discard Timer has timed out" },
{ ILLOPCODE, "Illegal Opcode in sequencer program" },
{ SQPARERR, "Sequencer Parity Error" },
@@ -79,7 +79,7 @@ static struct ahd_hard_error_entry ahd_hard_errors[] = {
};
static const u_int num_errors = ARRAY_SIZE(ahd_hard_errors);
-static struct ahd_phase_table_entry ahd_phase_table[] =
+static const struct ahd_phase_table_entry ahd_phase_table[] =
{
{ P_DATAOUT, MSG_NOOP, "in Data-out phase" },
{ P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
@@ -213,7 +213,7 @@ static void ahd_dumpseq(struct ahd_softc *ahd);
#endif
static void ahd_loadseq(struct ahd_softc *ahd);
static int ahd_check_patch(struct ahd_softc *ahd,
- struct patch **start_patch,
+ const struct patch **start_patch,
u_int start_instr, u_int *skip_addr);
static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd,
u_int address);
@@ -254,7 +254,7 @@ static void ahd_freeze_devq(struct ahd_softc *ahd,
struct scb *scb);
static void ahd_handle_scb_status(struct ahd_softc *ahd,
struct scb *scb);
-static struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase);
+static const struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase);
static void ahd_shutdown(void *arg);
static void ahd_update_coalescing_values(struct ahd_softc *ahd,
u_int timer,
@@ -266,8 +266,774 @@ static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb,
int target, char channel, int lun,
u_int tag, role_t role);
-/******************************** Private Inlines *****************************/
+static void ahd_reset_cmds_pending(struct ahd_softc *ahd);
+
+/*************************** Interrupt Services *******************************/
+static void ahd_run_qoutfifo(struct ahd_softc *ahd);
+#ifdef AHD_TARGET_MODE
+static void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused);
+#endif
+static void ahd_handle_hwerrint(struct ahd_softc *ahd);
+static void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat);
+static void ahd_handle_scsiint(struct ahd_softc *ahd,
+ u_int intstat);
+
+/************************ Sequencer Execution Control *************************/
+void
+ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
+{
+ if (ahd->src_mode == src && ahd->dst_mode == dst)
+ return;
+#ifdef AHD_DEBUG
+ if (ahd->src_mode == AHD_MODE_UNKNOWN
+ || ahd->dst_mode == AHD_MODE_UNKNOWN)
+ panic("Setting mode prior to saving it.\n");
+ if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
+ printf("%s: Setting mode 0x%x\n", ahd_name(ahd),
+ ahd_build_mode_state(ahd, src, dst));
+#endif
+ ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
+ ahd->src_mode = src;
+ ahd->dst_mode = dst;
+}
+
+static void
+ahd_update_modes(struct ahd_softc *ahd)
+{
+ ahd_mode_state mode_ptr;
+ ahd_mode src;
+ ahd_mode dst;
+
+ mode_ptr = ahd_inb(ahd, MODE_PTR);
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
+ printf("Reading mode 0x%x\n", mode_ptr);
+#endif
+ ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
+ ahd_known_modes(ahd, src, dst);
+}
+
+static void
+ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
+ ahd_mode dstmode, const char *file, int line)
+{
+#ifdef AHD_DEBUG
+ if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0
+ || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) {
+ panic("%s:%s:%d: Mode assertion failed.\n",
+ ahd_name(ahd), file, line);
+ }
+#endif
+}
+
+#define AHD_ASSERT_MODES(ahd, source, dest) \
+ ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__);
+
+ahd_mode_state
+ahd_save_modes(struct ahd_softc *ahd)
+{
+ if (ahd->src_mode == AHD_MODE_UNKNOWN
+ || ahd->dst_mode == AHD_MODE_UNKNOWN)
+ ahd_update_modes(ahd);
+
+ return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode));
+}
+
+void
+ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state)
+{
+ ahd_mode src;
+ ahd_mode dst;
+
+ ahd_extract_mode_state(ahd, state, &src, &dst);
+ ahd_set_modes(ahd, src, dst);
+}
+
+/*
+ * Determine whether the sequencer has halted code execution.
+ * Returns non-zero status if the sequencer is stopped.
+ */
+int
+ahd_is_paused(struct ahd_softc *ahd)
+{
+ return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
+}
+
+/*
+ * Request that the sequencer stop and wait, indefinitely, for it
+ * to stop. The sequencer will only acknowledge that it is paused
+ * once it has reached an instruction boundary and PAUSEDIS is
+ * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
+ * for critical sections.
+ */
+void
+ahd_pause(struct ahd_softc *ahd)
+{
+ ahd_outb(ahd, HCNTRL, ahd->pause);
+
+ /*
+ * Since the sequencer can disable pausing in a critical section, we
+ * must loop until it actually stops.
+ */
+ while (ahd_is_paused(ahd) == 0)
+ ;
+}
+
+/*
+ * Allow the sequencer to continue program execution.
+ * We check here to ensure that no additional interrupt
+ * sources that would cause the sequencer to halt have been
+ * asserted. If, for example, a SCSI bus reset is detected
+ * while we are fielding a different, pausing, interrupt type,
+ * we don't want to release the sequencer before going back
+ * into our interrupt handler and dealing with this new
+ * condition.
+ */
+void
+ahd_unpause(struct ahd_softc *ahd)
+{
+ /*
+ * Automatically restore our modes to those saved
+ * prior to the first change of the mode.
+ */
+ if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
+ && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) {
+ if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
+ ahd_reset_cmds_pending(ahd);
+ ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
+ }
+
+ if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0)
+ ahd_outb(ahd, HCNTRL, ahd->unpause);
+
+ ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN);
+}
+
+/*********************** Scatter Gather List Handling *************************/
+void *
+ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
+ void *sgptr, dma_addr_t addr, bus_size_t len, int last)
+{
+ scb->sg_count++;
+ if (sizeof(dma_addr_t) > 4
+ && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+ struct ahd_dma64_seg *sg;
+
+ sg = (struct ahd_dma64_seg *)sgptr;
+ sg->addr = ahd_htole64(addr);
+ sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0));
+ return (sg + 1);
+ } else {
+ struct ahd_dma_seg *sg;
+ sg = (struct ahd_dma_seg *)sgptr;
+ sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
+ sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000)
+ | (last ? AHD_DMA_LAST_SEG : 0));
+ return (sg + 1);
+ }
+}
+
+static void
+ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
+{
+ /* XXX Handle target mode SCBs. */
+ scb->crc_retry_count = 0;
+ if ((scb->flags & SCB_PACKETIZED) != 0) {
+ /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */
+ scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE;
+ } else {
+ if (ahd_get_transfer_length(scb) & 0x01)
+ scb->hscb->task_attribute = SCB_XFERLEN_ODD;
+ else
+ scb->hscb->task_attribute = 0;
+ }
+
+ if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
+ || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
+ scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
+ ahd_htole32(scb->sense_busaddr);
+}
+
+static void
+ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+ /*
+ * Copy the first SG into the "current" data ponter area.
+ */
+ if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+ struct ahd_dma64_seg *sg;
+
+ sg = (struct ahd_dma64_seg *)scb->sg_list;
+ scb->hscb->dataptr = sg->addr;
+ scb->hscb->datacnt = sg->len;
+ } else {
+ struct ahd_dma_seg *sg;
+ uint32_t *dataptr_words;
+
+ sg = (struct ahd_dma_seg *)scb->sg_list;
+ dataptr_words = (uint32_t*)&scb->hscb->dataptr;
+ dataptr_words[0] = sg->addr;
+ dataptr_words[1] = 0;
+ if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
+ uint64_t high_addr;
+
+ high_addr = ahd_le32toh(sg->len) & 0x7F000000;
+ scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
+ }
+ scb->hscb->datacnt = sg->len;
+ }
+ /*
+ * Note where to find the SG entries in bus space.
+ * We also set the full residual flag which the
+ * sequencer will clear as soon as a data transfer
+ * occurs.
+ */
+ scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
+}
+
+static void
+ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+ scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
+ scb->hscb->dataptr = 0;
+ scb->hscb->datacnt = 0;
+}
+
+/************************** Memory mapping routines ***************************/
+static void *
+ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
+{
+ dma_addr_t sg_offset;
+
+ /* sg_list_phys points to entry 1, not 0 */
+ sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
+ return ((uint8_t *)scb->sg_list + sg_offset);
+}
+
+static uint32_t
+ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
+{
+ dma_addr_t sg_offset;
+
+ /* sg_list_phys points to entry 1, not 0 */
+ sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
+ - ahd_sg_size(ahd);
+
+ return (scb->sg_list_busaddr + sg_offset);
+}
+
+static void
+ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
+{
+ ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat,
+ scb->hscb_map->dmamap,
+ /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
+ /*len*/sizeof(*scb->hscb), op);
+}
+
+void
+ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
+{
+ if (scb->sg_count == 0)
+ return;
+
+ ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat,
+ scb->sg_map->dmamap,
+ /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
+ /*len*/ahd_sg_size(ahd) * scb->sg_count, op);
+}
+
+static void
+ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
+{
+ ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat,
+ scb->sense_map->dmamap,
+ /*offset*/scb->sense_busaddr,
+ /*len*/AHD_SENSE_BUFSIZE, op);
+}
+
+#ifdef AHD_TARGET_MODE
+static uint32_t
+ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
+{
+ return (((uint8_t *)&ahd->targetcmds[index])
+ - (uint8_t *)ahd->qoutfifo);
+}
+#endif
+
+/*********************** Miscelaneous Support Functions ***********************/
+/*
+ * Return pointers to the transfer negotiation information
+ * for the specified our_id/remote_id pair.
+ */
+struct ahd_initiator_tinfo *
+ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
+ u_int remote_id, struct ahd_tmode_tstate **tstate)
+{
+ /*
+ * Transfer data structures are stored from the perspective
+ * of the target role. Since the parameters for a connection
+ * in the initiator role to a given target are the same as
+ * when the roles are reversed, we pretend we are the target.
+ */
+ if (channel == 'B')
+ our_id += 8;
+ *tstate = ahd->enabled_targets[our_id];
+ return (&(*tstate)->transinfo[remote_id]);
+}
+
+uint16_t
+ahd_inw(struct ahd_softc *ahd, u_int port)
+{
+ /*
+ * Read high byte first as some registers increment
+ * or have other side effects when the low byte is
+ * read.
+ */
+ uint16_t r = ahd_inb(ahd, port+1) << 8;
+ return r | ahd_inb(ahd, port);
+}
+
+void
+ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
+{
+ /*
+ * Write low byte first to accomodate registers
+ * such as PRGMCNT where the order maters.
+ */
+ ahd_outb(ahd, port, value & 0xFF);
+ ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
+}
+
+uint32_t
+ahd_inl(struct ahd_softc *ahd, u_int port)
+{
+ return ((ahd_inb(ahd, port))
+ | (ahd_inb(ahd, port+1) << 8)
+ | (ahd_inb(ahd, port+2) << 16)
+ | (ahd_inb(ahd, port+3) << 24));
+}
+
+void
+ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value)
+{
+ ahd_outb(ahd, port, (value) & 0xFF);
+ ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF);
+ ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF);
+ ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF);
+}
+
+uint64_t
+ahd_inq(struct ahd_softc *ahd, u_int port)
+{
+ return ((ahd_inb(ahd, port))
+ | (ahd_inb(ahd, port+1) << 8)
+ | (ahd_inb(ahd, port+2) << 16)
+ | (ahd_inb(ahd, port+3) << 24)
+ | (((uint64_t)ahd_inb(ahd, port+4)) << 32)
+ | (((uint64_t)ahd_inb(ahd, port+5)) << 40)
+ | (((uint64_t)ahd_inb(ahd, port+6)) << 48)
+ | (((uint64_t)ahd_inb(ahd, port+7)) << 56));
+}
+
+void
+ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value)
+{
+ ahd_outb(ahd, port, value & 0xFF);
+ ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
+ ahd_outb(ahd, port+2, (value >> 16) & 0xFF);
+ ahd_outb(ahd, port+3, (value >> 24) & 0xFF);
+ ahd_outb(ahd, port+4, (value >> 32) & 0xFF);
+ ahd_outb(ahd, port+5, (value >> 40) & 0xFF);
+ ahd_outb(ahd, port+6, (value >> 48) & 0xFF);
+ ahd_outb(ahd, port+7, (value >> 56) & 0xFF);
+}
+
+u_int
+ahd_get_scbptr(struct ahd_softc *ahd)
+{
+ AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+ ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+ return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8));
+}
+
+void
+ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr)
+{
+ AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+ ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+ ahd_outb(ahd, SCBPTR, scbptr & 0xFF);
+ ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF);
+}
+
+#if 0 /* unused */
+static u_int
+ahd_get_hnscb_qoff(struct ahd_softc *ahd)
+{
+ return (ahd_inw_atomic(ahd, HNSCB_QOFF));
+}
+#endif
+
+static void
+ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value)
+{
+ ahd_outw_atomic(ahd, HNSCB_QOFF, value);
+}
+
+#if 0 /* unused */
+static u_int
+ahd_get_hescb_qoff(struct ahd_softc *ahd)
+{
+ return (ahd_inb(ahd, HESCB_QOFF));
+}
+#endif
+
+static void
+ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value)
+{
+ ahd_outb(ahd, HESCB_QOFF, value);
+}
+
+static u_int
+ahd_get_snscb_qoff(struct ahd_softc *ahd)
+{
+ u_int oldvalue;
+
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ oldvalue = ahd_inw(ahd, SNSCB_QOFF);
+ ahd_outw(ahd, SNSCB_QOFF, oldvalue);
+ return (oldvalue);
+}
+
+static void
+ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value)
+{
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ ahd_outw(ahd, SNSCB_QOFF, value);
+}
+
+#if 0 /* unused */
+static u_int
+ahd_get_sescb_qoff(struct ahd_softc *ahd)
+{
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ return (ahd_inb(ahd, SESCB_QOFF));
+}
+#endif
+
+static void
+ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value)
+{
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ ahd_outb(ahd, SESCB_QOFF, value);
+}
+
+#if 0 /* unused */
+static u_int
+ahd_get_sdscb_qoff(struct ahd_softc *ahd)
+{
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8));
+}
+#endif
+
+static void
+ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value)
+{
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ ahd_outb(ahd, SDSCB_QOFF, value & 0xFF);
+ ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF);
+}
+
+u_int
+ahd_inb_scbram(struct ahd_softc *ahd, u_int offset)
+{
+ u_int value;
+
+ /*
+ * Workaround PCI-X Rev A. hardware bug.
+ * After a host read of SCB memory, the chip
+ * may become confused into thinking prefetch
+ * was required. This starts the discard timer
+ * running and can cause an unexpected discard
+ * timer interrupt. The work around is to read
+ * a normal register prior to the exhaustion of
+ * the discard timer. The mode pointer register
+ * has no side effects and so serves well for
+ * this purpose.
+ *
+ * Razor #528
+ */
+ value = ahd_inb(ahd, offset);
+ if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0)
+ ahd_inb(ahd, MODE_PTR);
+ return (value);
+}
+
+u_int
+ahd_inw_scbram(struct ahd_softc *ahd, u_int offset)
+{
+ return (ahd_inb_scbram(ahd, offset)
+ | (ahd_inb_scbram(ahd, offset+1) << 8));
+}
+
+static uint32_t
+ahd_inl_scbram(struct ahd_softc *ahd, u_int offset)
+{
+ return (ahd_inw_scbram(ahd, offset)
+ | (ahd_inw_scbram(ahd, offset+2) << 16));
+}
+
+static uint64_t
+ahd_inq_scbram(struct ahd_softc *ahd, u_int offset)
+{
+ return (ahd_inl_scbram(ahd, offset)
+ | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32);
+}
+
+struct scb *
+ahd_lookup_scb(struct ahd_softc *ahd, u_int tag)
+{
+ struct scb* scb;
+
+ if (tag >= AHD_SCB_MAX)
+ return (NULL);
+ scb = ahd->scb_data.scbindex[tag];
+ if (scb != NULL)
+ ahd_sync_scb(ahd, scb,
+ BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ return (scb);
+}
+
+static void
+ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
+{
+ struct hardware_scb *q_hscb;
+ struct map_node *q_hscb_map;
+ uint32_t saved_hscb_busaddr;
+
+ /*
+ * Our queuing method is a bit tricky. The card
+ * knows in advance which HSCB (by address) to download,
+ * and we can't disappoint it. To achieve this, the next
+ * HSCB to download is saved off in ahd->next_queued_hscb.
+ * When we are called to queue "an arbitrary scb",
+ * we copy the contents of the incoming HSCB to the one
+ * the sequencer knows about, swap HSCB pointers and
+ * finally assign the SCB to the tag indexed location
+ * in the scb_array. This makes sure that we can still
+ * locate the correct SCB by SCB_TAG.
+ */
+ q_hscb = ahd->next_queued_hscb;
+ q_hscb_map = ahd->next_queued_hscb_map;
+ saved_hscb_busaddr = q_hscb->hscb_busaddr;
+ memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
+ q_hscb->hscb_busaddr = saved_hscb_busaddr;
+ q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
+
+ /* Now swap HSCB pointers. */
+ ahd->next_queued_hscb = scb->hscb;
+ ahd->next_queued_hscb_map = scb->hscb_map;
+ scb->hscb = q_hscb;
+ scb->hscb_map = q_hscb_map;
+
+ /* Now define the mapping from tag to SCB in the scbindex */
+ ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
+}
+
+/*
+ * Tell the sequencer about a new transaction to execute.
+ */
+void
+ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+ ahd_swap_with_next_hscb(ahd, scb);
+
+ if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
+ panic("Attempt to queue invalid SCB tag %x\n",
+ SCB_GET_TAG(scb));
+
+ /*
+ * Keep a history of SCBs we've downloaded in the qinfifo.
+ */
+ ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
+ ahd->qinfifonext++;
+
+ if (scb->sg_count != 0)
+ ahd_setup_data_scb(ahd, scb);
+ else
+ ahd_setup_noxfer_scb(ahd, scb);
+ ahd_setup_scb_common(ahd, scb);
+
+ /*
+ * Make sure our data is consistent from the
+ * perspective of the adapter.
+ */
+ ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_QUEUE) != 0) {
+ uint64_t host_dataptr;
+
+ host_dataptr = ahd_le64toh(scb->hscb->dataptr);
+ printf("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
+ ahd_name(ahd),
+ SCB_GET_TAG(scb), scb->hscb->scsiid,
+ ahd_le32toh(scb->hscb->hscb_busaddr),
+ (u_int)((host_dataptr >> 32) & 0xFFFFFFFF),
+ (u_int)(host_dataptr & 0xFFFFFFFF),
+ ahd_le32toh(scb->hscb->datacnt));
+ }
+#endif
+ /* Tell the adapter about the newly queued SCB */
+ ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
+}
+
+/************************** Interrupt Processing ******************************/
+static void
+ahd_sync_qoutfifo(struct ahd_softc *ahd, int op)
+{
+ ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
+ /*offset*/0,
+ /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op);
+}
+
+static void
+ahd_sync_tqinfifo(struct ahd_softc *ahd, int op)
+{
+#ifdef AHD_TARGET_MODE
+ if ((ahd->flags & AHD_TARGETROLE) != 0) {
+ ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
+ ahd->shared_data_map.dmamap,
+ ahd_targetcmd_offset(ahd, 0),
+ sizeof(struct target_cmd) * AHD_TMODE_CMDS,
+ op);
+ }
+#endif
+}
+
+/*
+ * See if the firmware has posted any completed commands
+ * into our in-core command complete fifos.
+ */
+#define AHD_RUN_QOUTFIFO 0x1
+#define AHD_RUN_TQINFIFO 0x2
+static u_int
+ahd_check_cmdcmpltqueues(struct ahd_softc *ahd)
+{
+ u_int retval;
+
+ retval = 0;
+ ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
+ /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo),
+ /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD);
+ if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag
+ == ahd->qoutfifonext_valid_tag)
+ retval |= AHD_RUN_QOUTFIFO;
+#ifdef AHD_TARGET_MODE
+ if ((ahd->flags & AHD_TARGETROLE) != 0
+ && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) {
+ ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
+ ahd->shared_data_map.dmamap,
+ ahd_targetcmd_offset(ahd, ahd->tqinfifofnext),
+ /*len*/sizeof(struct target_cmd),
+ BUS_DMASYNC_POSTREAD);
+ if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0)
+ retval |= AHD_RUN_TQINFIFO;
+ }
+#endif
+ return (retval);
+}
+
+/*
+ * Catch an interrupt from the adapter
+ */
+int
+ahd_intr(struct ahd_softc *ahd)
+{
+ u_int intstat;
+
+ if ((ahd->pause & INTEN) == 0) {
+ /*
+ * Our interrupt is not enabled on the chip
+ * and may be disabled for re-entrancy reasons,
+ * so just return. This is likely just a shared
+ * interrupt.
+ */
+ return (0);
+ }
+
+ /*
+ * Instead of directly reading the interrupt status register,
+ * infer the cause of the interrupt by checking our in-core
+ * completion queues. This avoids a costly PCI bus read in
+ * most cases.
+ */
+ if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
+ && (ahd_check_cmdcmpltqueues(ahd) != 0))
+ intstat = CMDCMPLT;
+ else
+ intstat = ahd_inb(ahd, INTSTAT);
+
+ if ((intstat & INT_PEND) == 0)
+ return (0);
+
+ if (intstat & CMDCMPLT) {
+ ahd_outb(ahd, CLRINT, CLRCMDINT);
+
+ /*
+ * Ensure that the chip sees that we've cleared
+ * this interrupt before we walk the output fifo.
+ * Otherwise, we may, due to posted bus writes,
+ * clear the interrupt after we finish the scan,
+ * and after the sequencer has added new entries
+ * and asserted the interrupt again.
+ */
+ if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
+ if (ahd_is_paused(ahd)) {
+ /*
+ * Potentially lost SEQINT.
+ * If SEQINTCODE is non-zero,
+ * simulate the SEQINT.
+ */
+ if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
+ intstat |= SEQINT;
+ }
+ } else {
+ ahd_flush_device_writes(ahd);
+ }
+ ahd_run_qoutfifo(ahd);
+ ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
+ ahd->cmdcmplt_total++;
+#ifdef AHD_TARGET_MODE
+ if ((ahd->flags & AHD_TARGETROLE) != 0)
+ ahd_run_tqinfifo(ahd, /*paused*/FALSE);
+#endif
+ }
+
+ /*
+ * Handle statuses that may invalidate our cached
+ * copy of INTSTAT separately.
+ */
+ if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) {
+ /* Hot eject. Do nothing */
+ } else if (intstat & HWERRINT) {
+ ahd_handle_hwerrint(ahd);
+ } else if ((intstat & (PCIINT|SPLTINT)) != 0) {
+ ahd->bus_intr(ahd);
+ } else {
+
+ if ((intstat & SEQINT) != 0)
+ ahd_handle_seqint(ahd, intstat);
+
+ if ((intstat & SCSIINT) != 0)
+ ahd_handle_scsiint(ahd, intstat);
+ }
+ return (1);
+}
+
+/******************************** Private Inlines *****************************/
static __inline void
ahd_assert_atn(struct ahd_softc *ahd)
{
@@ -280,7 +1046,7 @@ ahd_assert_atn(struct ahd_softc *ahd)
* are currently in a packetized transfer. We could
* just as easily be sending or receiving a message.
*/
-static __inline int
+static int
ahd_currently_packetized(struct ahd_softc *ahd)
{
ahd_mode_state saved_modes;
@@ -896,7 +1662,7 @@ clrchn:
* a copy of the first byte (little endian) of the sgptr
* hscb field.
*/
-void
+static void
ahd_run_qoutfifo(struct ahd_softc *ahd)
{
struct ahd_completion *completion;
@@ -935,7 +1701,7 @@ ahd_run_qoutfifo(struct ahd_softc *ahd)
}
/************************* Interrupt Handling *********************************/
-void
+static void
ahd_handle_hwerrint(struct ahd_softc *ahd)
{
/*
@@ -1009,7 +1775,7 @@ ahd_dump_sglist(struct scb *scb)
}
#endif /* AHD_DEBUG */
-void
+static void
ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
{
u_int seqintcode;
@@ -1621,7 +2387,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
ahd_unpause(ahd);
}
-void
+static void
ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
{
struct scb *scb;
@@ -3571,11 +4337,11 @@ ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
devinfo->target, devinfo->lun);
}
-static struct ahd_phase_table_entry*
+static const struct ahd_phase_table_entry*
ahd_lookup_phase_entry(int phase)
{
- struct ahd_phase_table_entry *entry;
- struct ahd_phase_table_entry *last_entry;
+ const struct ahd_phase_table_entry *entry;
+ const struct ahd_phase_table_entry *last_entry;
/*
* num_phases doesn't include the default entry which
@@ -3941,7 +4707,7 @@ ahd_clear_msg_state(struct ahd_softc *ahd)
*/
static void
ahd_handle_message_phase(struct ahd_softc *ahd)
-{
+{
struct ahd_devinfo devinfo;
u_int bus_phase;
int end_session;
@@ -5983,8 +6749,7 @@ found:
*/
void
ahd_free_scb(struct ahd_softc *ahd, struct scb *scb)
-{
-
+{
/* Clean up for the next user */
scb->flags = SCB_FLAG_NONE;
scb->hscb->control = 0;
@@ -6272,6 +7037,24 @@ static const char *termstat_strings[] = {
"Not Configured"
};
+/***************************** Timer Facilities *******************************/
+#define ahd_timer_init init_timer
+#define ahd_timer_stop del_timer_sync
+typedef void ahd_linux_callback_t (u_long);
+
+static void
+ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
+{
+ struct ahd_softc *ahd;
+
+ ahd = (struct ahd_softc *)arg;
+ del_timer(timer);
+ timer->data = (u_long)arg;
+ timer->expires = jiffies + (usec * HZ)/1000000;
+ timer->function = (ahd_linux_callback_t*)func;
+ add_timer(timer);
+}
+
/*
* Start the board, ready for normal operation
*/
@@ -7370,7 +8153,7 @@ ahd_qinfifo_count(struct ahd_softc *ahd)
+ ARRAY_SIZE(ahd->qinfifo) - wrap_qinpos);
}
-void
+static void
ahd_reset_cmds_pending(struct ahd_softc *ahd)
{
struct scb *scb;
@@ -8571,7 +9354,7 @@ ahd_loadseq(struct ahd_softc *ahd)
struct cs cs_table[num_critical_sections];
u_int begin_set[num_critical_sections];
u_int end_set[num_critical_sections];
- struct patch *cur_patch;
+ const struct patch *cur_patch;
u_int cs_count;
u_int cur_cs;
u_int i;
@@ -8726,11 +9509,11 @@ ahd_loadseq(struct ahd_softc *ahd)
}
static int
-ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch,
+ahd_check_patch(struct ahd_softc *ahd, const struct patch **start_patch,
u_int start_instr, u_int *skip_addr)
{
- struct patch *cur_patch;
- struct patch *last_patch;
+ const struct patch *cur_patch;
+ const struct patch *last_patch;
u_int num_patches;
num_patches = ARRAY_SIZE(patches);
@@ -8764,7 +9547,7 @@ ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch,
static u_int
ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address)
{
- struct patch *cur_patch;
+ const struct patch *cur_patch;
int address_offset;
u_int skip_addr;
u_int i;
@@ -8895,7 +9678,7 @@ sized:
}
int
-ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries,
+ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries,
const char *name, u_int address, u_int value,
u_int *cur_column, u_int wrap_point)
{
@@ -9886,7 +10669,7 @@ ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask)
#endif
}
-void
+static void
ahd_run_tqinfifo(struct ahd_softc *ahd, int paused)
{
struct target_cmd *cmd;
diff --git a/drivers/scsi/aic7xxx/aic79xx_inline.h b/drivers/scsi/aic7xxx/aic79xx_inline.h
index 45e55575a0fa..5f12cf9d99d0 100644
--- a/drivers/scsi/aic7xxx/aic79xx_inline.h
+++ b/drivers/scsi/aic7xxx/aic79xx_inline.h
@@ -63,18 +63,15 @@ static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *ahd,
static __inline void ahd_extract_mode_state(struct ahd_softc *ahd,
ahd_mode_state state,
ahd_mode *src, ahd_mode *dst);
-static __inline void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src,
- ahd_mode dst);
-static __inline void ahd_update_modes(struct ahd_softc *ahd);
-static __inline void ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
- ahd_mode dstmode, const char *file,
- int line);
-static __inline ahd_mode_state ahd_save_modes(struct ahd_softc *ahd);
-static __inline void ahd_restore_modes(struct ahd_softc *ahd,
- ahd_mode_state state);
-static __inline int ahd_is_paused(struct ahd_softc *ahd);
-static __inline void ahd_pause(struct ahd_softc *ahd);
-static __inline void ahd_unpause(struct ahd_softc *ahd);
+
+void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src,
+ ahd_mode dst);
+ahd_mode_state ahd_save_modes(struct ahd_softc *ahd);
+void ahd_restore_modes(struct ahd_softc *ahd,
+ ahd_mode_state state);
+int ahd_is_paused(struct ahd_softc *ahd);
+void ahd_pause(struct ahd_softc *ahd);
+void ahd_unpause(struct ahd_softc *ahd);
static __inline void
ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
@@ -99,256 +96,16 @@ ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state,
*dst = (state & DST_MODE) >> DST_MODE_SHIFT;
}
-static __inline void
-ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
-{
- if (ahd->src_mode == src && ahd->dst_mode == dst)
- return;
-#ifdef AHD_DEBUG
- if (ahd->src_mode == AHD_MODE_UNKNOWN
- || ahd->dst_mode == AHD_MODE_UNKNOWN)
- panic("Setting mode prior to saving it.\n");
- if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
- printf("%s: Setting mode 0x%x\n", ahd_name(ahd),
- ahd_build_mode_state(ahd, src, dst));
-#endif
- ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
- ahd->src_mode = src;
- ahd->dst_mode = dst;
-}
-
-static __inline void
-ahd_update_modes(struct ahd_softc *ahd)
-{
- ahd_mode_state mode_ptr;
- ahd_mode src;
- ahd_mode dst;
-
- mode_ptr = ahd_inb(ahd, MODE_PTR);
-#ifdef AHD_DEBUG
- if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
- printf("Reading mode 0x%x\n", mode_ptr);
-#endif
- ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
- ahd_known_modes(ahd, src, dst);
-}
-
-static __inline void
-ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
- ahd_mode dstmode, const char *file, int line)
-{
-#ifdef AHD_DEBUG
- if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0
- || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) {
- panic("%s:%s:%d: Mode assertion failed.\n",
- ahd_name(ahd), file, line);
- }
-#endif
-}
-
-static __inline ahd_mode_state
-ahd_save_modes(struct ahd_softc *ahd)
-{
- if (ahd->src_mode == AHD_MODE_UNKNOWN
- || ahd->dst_mode == AHD_MODE_UNKNOWN)
- ahd_update_modes(ahd);
-
- return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode));
-}
-
-static __inline void
-ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state)
-{
- ahd_mode src;
- ahd_mode dst;
-
- ahd_extract_mode_state(ahd, state, &src, &dst);
- ahd_set_modes(ahd, src, dst);
-}
-
-#define AHD_ASSERT_MODES(ahd, source, dest) \
- ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__);
-
-/*
- * Determine whether the sequencer has halted code execution.
- * Returns non-zero status if the sequencer is stopped.
- */
-static __inline int
-ahd_is_paused(struct ahd_softc *ahd)
-{
- return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
-}
-
-/*
- * Request that the sequencer stop and wait, indefinitely, for it
- * to stop. The sequencer will only acknowledge that it is paused
- * once it has reached an instruction boundary and PAUSEDIS is
- * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
- * for critical sections.
- */
-static __inline void
-ahd_pause(struct ahd_softc *ahd)
-{
- ahd_outb(ahd, HCNTRL, ahd->pause);
-
- /*
- * Since the sequencer can disable pausing in a critical section, we
- * must loop until it actually stops.
- */
- while (ahd_is_paused(ahd) == 0)
- ;
-}
-
-/*
- * Allow the sequencer to continue program execution.
- * We check here to ensure that no additional interrupt
- * sources that would cause the sequencer to halt have been
- * asserted. If, for example, a SCSI bus reset is detected
- * while we are fielding a different, pausing, interrupt type,
- * we don't want to release the sequencer before going back
- * into our interrupt handler and dealing with this new
- * condition.
- */
-static __inline void
-ahd_unpause(struct ahd_softc *ahd)
-{
- /*
- * Automatically restore our modes to those saved
- * prior to the first change of the mode.
- */
- if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
- && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) {
- if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
- ahd_reset_cmds_pending(ahd);
- ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
- }
-
- if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0)
- ahd_outb(ahd, HCNTRL, ahd->unpause);
-
- ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN);
-}
-
/*********************** Scatter Gather List Handling *************************/
-static __inline void *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
- void *sgptr, dma_addr_t addr,
- bus_size_t len, int last);
-static __inline void ahd_setup_scb_common(struct ahd_softc *ahd,
- struct scb *scb);
-static __inline void ahd_setup_data_scb(struct ahd_softc *ahd,
- struct scb *scb);
-static __inline void ahd_setup_noxfer_scb(struct ahd_softc *ahd,
- struct scb *scb);
-
-static __inline void *
-ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
- void *sgptr, dma_addr_t addr, bus_size_t len, int last)
-{
- scb->sg_count++;
- if (sizeof(dma_addr_t) > 4
- && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
- struct ahd_dma64_seg *sg;
-
- sg = (struct ahd_dma64_seg *)sgptr;
- sg->addr = ahd_htole64(addr);
- sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0));
- return (sg + 1);
- } else {
- struct ahd_dma_seg *sg;
-
- sg = (struct ahd_dma_seg *)sgptr;
- sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
- sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000)
- | (last ? AHD_DMA_LAST_SEG : 0));
- return (sg + 1);
- }
-}
-
-static __inline void
-ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
-{
- /* XXX Handle target mode SCBs. */
- scb->crc_retry_count = 0;
- if ((scb->flags & SCB_PACKETIZED) != 0) {
- /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */
- scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE;
- } else {
- if (ahd_get_transfer_length(scb) & 0x01)
- scb->hscb->task_attribute = SCB_XFERLEN_ODD;
- else
- scb->hscb->task_attribute = 0;
- }
-
- if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
- || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
- scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
- ahd_htole32(scb->sense_busaddr);
-}
-
-static __inline void
-ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
-{
- /*
- * Copy the first SG into the "current" data ponter area.
- */
- if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
- struct ahd_dma64_seg *sg;
-
- sg = (struct ahd_dma64_seg *)scb->sg_list;
- scb->hscb->dataptr = sg->addr;
- scb->hscb->datacnt = sg->len;
- } else {
- struct ahd_dma_seg *sg;
- uint32_t *dataptr_words;
-
- sg = (struct ahd_dma_seg *)scb->sg_list;
- dataptr_words = (uint32_t*)&scb->hscb->dataptr;
- dataptr_words[0] = sg->addr;
- dataptr_words[1] = 0;
- if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
- uint64_t high_addr;
-
- high_addr = ahd_le32toh(sg->len) & 0x7F000000;
- scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
- }
- scb->hscb->datacnt = sg->len;
- }
- /*
- * Note where to find the SG entries in bus space.
- * We also set the full residual flag which the
- * sequencer will clear as soon as a data transfer
- * occurs.
- */
- scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
-}
-
-static __inline void
-ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
-{
- scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
- scb->hscb->dataptr = 0;
- scb->hscb->datacnt = 0;
-}
+void *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
+ void *sgptr, dma_addr_t addr,
+ bus_size_t len, int last);
/************************** Memory mapping routines ***************************/
static __inline size_t ahd_sg_size(struct ahd_softc *ahd);
-static __inline void *
- ahd_sg_bus_to_virt(struct ahd_softc *ahd,
- struct scb *scb,
- uint32_t sg_busaddr);
-static __inline uint32_t
- ahd_sg_virt_to_bus(struct ahd_softc *ahd,
- struct scb *scb,
- void *sg);
-static __inline void ahd_sync_scb(struct ahd_softc *ahd,
- struct scb *scb, int op);
-static __inline void ahd_sync_sglist(struct ahd_softc *ahd,
- struct scb *scb, int op);
-static __inline void ahd_sync_sense(struct ahd_softc *ahd,
- struct scb *scb, int op);
-static __inline uint32_t
- ahd_targetcmd_offset(struct ahd_softc *ahd,
- u_int index);
+
+void ahd_sync_sglist(struct ahd_softc *ahd,
+ struct scb *scb, int op);
static __inline size_t
ahd_sg_size(struct ahd_softc *ahd)
@@ -358,104 +115,32 @@ ahd_sg_size(struct ahd_softc *ahd)
return (sizeof(struct ahd_dma_seg));
}
-static __inline void *
-ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
-{
- dma_addr_t sg_offset;
-
- /* sg_list_phys points to entry 1, not 0 */
- sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
- return ((uint8_t *)scb->sg_list + sg_offset);
-}
-
-static __inline uint32_t
-ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
-{
- dma_addr_t sg_offset;
-
- /* sg_list_phys points to entry 1, not 0 */
- sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
- - ahd_sg_size(ahd);
-
- return (scb->sg_list_busaddr + sg_offset);
-}
-
-static __inline void
-ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
-{
- ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat,
- scb->hscb_map->dmamap,
- /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
- /*len*/sizeof(*scb->hscb), op);
-}
-
-static __inline void
-ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
-{
- if (scb->sg_count == 0)
- return;
-
- ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat,
- scb->sg_map->dmamap,
- /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
- /*len*/ahd_sg_size(ahd) * scb->sg_count, op);
-}
-
-static __inline void
-ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
-{
- ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat,
- scb->sense_map->dmamap,
- /*offset*/scb->sense_busaddr,
- /*len*/AHD_SENSE_BUFSIZE, op);
-}
-
-static __inline uint32_t
-ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
-{
- return (((uint8_t *)&ahd->targetcmds[index])
- - (uint8_t *)ahd->qoutfifo);
-}
-
/*********************** Miscellaneous Support Functions ***********************/
-static __inline struct ahd_initiator_tinfo *
- ahd_fetch_transinfo(struct ahd_softc *ahd,
- char channel, u_int our_id,
- u_int remote_id,
- struct ahd_tmode_tstate **tstate);
-static __inline uint16_t
- ahd_inw(struct ahd_softc *ahd, u_int port);
-static __inline void ahd_outw(struct ahd_softc *ahd, u_int port,
- u_int value);
-static __inline uint32_t
- ahd_inl(struct ahd_softc *ahd, u_int port);
-static __inline void ahd_outl(struct ahd_softc *ahd, u_int port,
- uint32_t value);
-static __inline uint64_t
- ahd_inq(struct ahd_softc *ahd, u_int port);
-static __inline void ahd_outq(struct ahd_softc *ahd, u_int port,
- uint64_t value);
-static __inline u_int ahd_get_scbptr(struct ahd_softc *ahd);
-static __inline void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr);
-static __inline u_int ahd_get_hnscb_qoff(struct ahd_softc *ahd);
-static __inline void ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value);
-static __inline u_int ahd_get_hescb_qoff(struct ahd_softc *ahd);
-static __inline void ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value);
-static __inline u_int ahd_get_snscb_qoff(struct ahd_softc *ahd);
-static __inline void ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value);
-static __inline u_int ahd_get_sescb_qoff(struct ahd_softc *ahd);
-static __inline void ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value);
-static __inline u_int ahd_get_sdscb_qoff(struct ahd_softc *ahd);
-static __inline void ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value);
-static __inline u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset);
-static __inline u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset);
-static __inline uint32_t
- ahd_inl_scbram(struct ahd_softc *ahd, u_int offset);
-static __inline uint64_t
- ahd_inq_scbram(struct ahd_softc *ahd, u_int offset);
-static __inline void ahd_swap_with_next_hscb(struct ahd_softc *ahd,
- struct scb *scb);
-static __inline void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb);
+struct ahd_initiator_tinfo *
+ ahd_fetch_transinfo(struct ahd_softc *ahd,
+ char channel, u_int our_id,
+ u_int remote_id,
+ struct ahd_tmode_tstate **tstate);
+uint16_t
+ ahd_inw(struct ahd_softc *ahd, u_int port);
+void ahd_outw(struct ahd_softc *ahd, u_int port,
+ u_int value);
+uint32_t
+ ahd_inl(struct ahd_softc *ahd, u_int port);
+void ahd_outl(struct ahd_softc *ahd, u_int port,
+ uint32_t value);
+uint64_t
+ ahd_inq(struct ahd_softc *ahd, u_int port);
+void ahd_outq(struct ahd_softc *ahd, u_int port,
+ uint64_t value);
+u_int ahd_get_scbptr(struct ahd_softc *ahd);
+void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr);
+u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset);
+u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset);
+struct scb *
+ ahd_lookup_scb(struct ahd_softc *ahd, u_int tag);
+void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb);
+
static __inline uint8_t *
ahd_get_sense_buf(struct ahd_softc *ahd,
struct scb *scb);
@@ -463,25 +148,7 @@ static __inline uint32_t
ahd_get_sense_bufaddr(struct ahd_softc *ahd,
struct scb *scb);
-/*
- * Return pointers to the transfer negotiation information
- * for the specified our_id/remote_id pair.
- */
-static __inline struct ahd_initiator_tinfo *
-ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
- u_int remote_id, struct ahd_tmode_tstate **tstate)
-{
- /*
- * Transfer data structures are stored from the perspective
- * of the target role. Since the parameters for a connection
- * in the initiator role to a given target are the same as
- * when the roles are reversed, we pretend we are the target.
- */
- if (channel == 'B')
- our_id += 8;
- *tstate = ahd->enabled_targets[our_id];
- return (&(*tstate)->transinfo[remote_id]);
-}
+#if 0 /* unused */
#define AHD_COPY_COL_IDX(dst, src) \
do { \
@@ -489,304 +156,7 @@ do { \
dst->hscb->lun = src->hscb->lun; \
} while (0)
-static __inline uint16_t
-ahd_inw(struct ahd_softc *ahd, u_int port)
-{
- /*
- * Read high byte first as some registers increment
- * or have other side effects when the low byte is
- * read.
- */
- uint16_t r = ahd_inb(ahd, port+1) << 8;
- return r | ahd_inb(ahd, port);
-}
-
-static __inline void
-ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
-{
- /*
- * Write low byte first to accomodate registers
- * such as PRGMCNT where the order maters.
- */
- ahd_outb(ahd, port, value & 0xFF);
- ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
-}
-
-static __inline uint32_t
-ahd_inl(struct ahd_softc *ahd, u_int port)
-{
- return ((ahd_inb(ahd, port))
- | (ahd_inb(ahd, port+1) << 8)
- | (ahd_inb(ahd, port+2) << 16)
- | (ahd_inb(ahd, port+3) << 24));
-}
-
-static __inline void
-ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value)
-{
- ahd_outb(ahd, port, (value) & 0xFF);
- ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF);
- ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF);
- ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF);
-}
-
-static __inline uint64_t
-ahd_inq(struct ahd_softc *ahd, u_int port)
-{
- return ((ahd_inb(ahd, port))
- | (ahd_inb(ahd, port+1) << 8)
- | (ahd_inb(ahd, port+2) << 16)
- | (ahd_inb(ahd, port+3) << 24)
- | (((uint64_t)ahd_inb(ahd, port+4)) << 32)
- | (((uint64_t)ahd_inb(ahd, port+5)) << 40)
- | (((uint64_t)ahd_inb(ahd, port+6)) << 48)
- | (((uint64_t)ahd_inb(ahd, port+7)) << 56));
-}
-
-static __inline void
-ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value)
-{
- ahd_outb(ahd, port, value & 0xFF);
- ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
- ahd_outb(ahd, port+2, (value >> 16) & 0xFF);
- ahd_outb(ahd, port+3, (value >> 24) & 0xFF);
- ahd_outb(ahd, port+4, (value >> 32) & 0xFF);
- ahd_outb(ahd, port+5, (value >> 40) & 0xFF);
- ahd_outb(ahd, port+6, (value >> 48) & 0xFF);
- ahd_outb(ahd, port+7, (value >> 56) & 0xFF);
-}
-
-static __inline u_int
-ahd_get_scbptr(struct ahd_softc *ahd)
-{
- AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
- ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
- return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8));
-}
-
-static __inline void
-ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr)
-{
- AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
- ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
- ahd_outb(ahd, SCBPTR, scbptr & 0xFF);
- ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF);
-}
-
-static __inline u_int
-ahd_get_hnscb_qoff(struct ahd_softc *ahd)
-{
- return (ahd_inw_atomic(ahd, HNSCB_QOFF));
-}
-
-static __inline void
-ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value)
-{
- ahd_outw_atomic(ahd, HNSCB_QOFF, value);
-}
-
-static __inline u_int
-ahd_get_hescb_qoff(struct ahd_softc *ahd)
-{
- return (ahd_inb(ahd, HESCB_QOFF));
-}
-
-static __inline void
-ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value)
-{
- ahd_outb(ahd, HESCB_QOFF, value);
-}
-
-static __inline u_int
-ahd_get_snscb_qoff(struct ahd_softc *ahd)
-{
- u_int oldvalue;
-
- AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
- oldvalue = ahd_inw(ahd, SNSCB_QOFF);
- ahd_outw(ahd, SNSCB_QOFF, oldvalue);
- return (oldvalue);
-}
-
-static __inline void
-ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value)
-{
- AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
- ahd_outw(ahd, SNSCB_QOFF, value);
-}
-
-static __inline u_int
-ahd_get_sescb_qoff(struct ahd_softc *ahd)
-{
- AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
- return (ahd_inb(ahd, SESCB_QOFF));
-}
-
-static __inline void
-ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value)
-{
- AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
- ahd_outb(ahd, SESCB_QOFF, value);
-}
-
-static __inline u_int
-ahd_get_sdscb_qoff(struct ahd_softc *ahd)
-{
- AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
- return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8));
-}
-
-static __inline void
-ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value)
-{
- AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
- ahd_outb(ahd, SDSCB_QOFF, value & 0xFF);
- ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF);
-}
-
-static __inline u_int
-ahd_inb_scbram(struct ahd_softc *ahd, u_int offset)
-{
- u_int value;
-
- /*
- * Workaround PCI-X Rev A. hardware bug.
- * After a host read of SCB memory, the chip
- * may become confused into thinking prefetch
- * was required. This starts the discard timer
- * running and can cause an unexpected discard
- * timer interrupt. The work around is to read
- * a normal register prior to the exhaustion of
- * the discard timer. The mode pointer register
- * has no side effects and so serves well for
- * this purpose.
- *
- * Razor #528
- */
- value = ahd_inb(ahd, offset);
- if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0)
- ahd_inb(ahd, MODE_PTR);
- return (value);
-}
-
-static __inline u_int
-ahd_inw_scbram(struct ahd_softc *ahd, u_int offset)
-{
- return (ahd_inb_scbram(ahd, offset)
- | (ahd_inb_scbram(ahd, offset+1) << 8));
-}
-
-static __inline uint32_t
-ahd_inl_scbram(struct ahd_softc *ahd, u_int offset)
-{
- return (ahd_inw_scbram(ahd, offset)
- | (ahd_inw_scbram(ahd, offset+2) << 16));
-}
-
-static __inline uint64_t
-ahd_inq_scbram(struct ahd_softc *ahd, u_int offset)
-{
- return (ahd_inl_scbram(ahd, offset)
- | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32);
-}
-
-static __inline struct scb *
-ahd_lookup_scb(struct ahd_softc *ahd, u_int tag)
-{
- struct scb* scb;
-
- if (tag >= AHD_SCB_MAX)
- return (NULL);
- scb = ahd->scb_data.scbindex[tag];
- if (scb != NULL)
- ahd_sync_scb(ahd, scb,
- BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
- return (scb);
-}
-
-static __inline void
-ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
-{
- struct hardware_scb *q_hscb;
- struct map_node *q_hscb_map;
- uint32_t saved_hscb_busaddr;
-
- /*
- * Our queuing method is a bit tricky. The card
- * knows in advance which HSCB (by address) to download,
- * and we can't disappoint it. To achieve this, the next
- * HSCB to download is saved off in ahd->next_queued_hscb.
- * When we are called to queue "an arbitrary scb",
- * we copy the contents of the incoming HSCB to the one
- * the sequencer knows about, swap HSCB pointers and
- * finally assign the SCB to the tag indexed location
- * in the scb_array. This makes sure that we can still
- * locate the correct SCB by SCB_TAG.
- */
- q_hscb = ahd->next_queued_hscb;
- q_hscb_map = ahd->next_queued_hscb_map;
- saved_hscb_busaddr = q_hscb->hscb_busaddr;
- memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
- q_hscb->hscb_busaddr = saved_hscb_busaddr;
- q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
-
- /* Now swap HSCB pointers. */
- ahd->next_queued_hscb = scb->hscb;
- ahd->next_queued_hscb_map = scb->hscb_map;
- scb->hscb = q_hscb;
- scb->hscb_map = q_hscb_map;
-
- /* Now define the mapping from tag to SCB in the scbindex */
- ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
-}
-
-/*
- * Tell the sequencer about a new transaction to execute.
- */
-static __inline void
-ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
-{
- ahd_swap_with_next_hscb(ahd, scb);
-
- if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
- panic("Attempt to queue invalid SCB tag %x\n",
- SCB_GET_TAG(scb));
-
- /*
- * Keep a history of SCBs we've downloaded in the qinfifo.
- */
- ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
- ahd->qinfifonext++;
-
- if (scb->sg_count != 0)
- ahd_setup_data_scb(ahd, scb);
- else
- ahd_setup_noxfer_scb(ahd, scb);
- ahd_setup_scb_common(ahd, scb);
-
- /*
- * Make sure our data is consistent from the
- * perspective of the adapter.
- */
- ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
-
-#ifdef AHD_DEBUG
- if ((ahd_debug & AHD_SHOW_QUEUE) != 0) {
- uint64_t host_dataptr;
-
- host_dataptr = ahd_le64toh(scb->hscb->dataptr);
- printf("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
- ahd_name(ahd),
- SCB_GET_TAG(scb), scb->hscb->scsiid,
- ahd_le32toh(scb->hscb->hscb_busaddr),
- (u_int)((host_dataptr >> 32) & 0xFFFFFFFF),
- (u_int)(host_dataptr & 0xFFFFFFFF),
- ahd_le32toh(scb->hscb->datacnt));
- }
#endif
- /* Tell the adapter about the newly queued SCB */
- ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
-}
static __inline uint8_t *
ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb)
@@ -801,151 +171,6 @@ ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb)
}
/************************** Interrupt Processing ******************************/
-static __inline void ahd_sync_qoutfifo(struct ahd_softc *ahd, int op);
-static __inline void ahd_sync_tqinfifo(struct ahd_softc *ahd, int op);
-static __inline u_int ahd_check_cmdcmpltqueues(struct ahd_softc *ahd);
-static __inline int ahd_intr(struct ahd_softc *ahd);
-
-static __inline void
-ahd_sync_qoutfifo(struct ahd_softc *ahd, int op)
-{
- ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
- /*offset*/0,
- /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op);
-}
-
-static __inline void
-ahd_sync_tqinfifo(struct ahd_softc *ahd, int op)
-{
-#ifdef AHD_TARGET_MODE
- if ((ahd->flags & AHD_TARGETROLE) != 0) {
- ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
- ahd->shared_data_map.dmamap,
- ahd_targetcmd_offset(ahd, 0),
- sizeof(struct target_cmd) * AHD_TMODE_CMDS,
- op);
- }
-#endif
-}
-
-/*
- * See if the firmware has posted any completed commands
- * into our in-core command complete fifos.
- */
-#define AHD_RUN_QOUTFIFO 0x1
-#define AHD_RUN_TQINFIFO 0x2
-static __inline u_int
-ahd_check_cmdcmpltqueues(struct ahd_softc *ahd)
-{
- u_int retval;
-
- retval = 0;
- ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
- /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo),
- /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD);
- if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag
- == ahd->qoutfifonext_valid_tag)
- retval |= AHD_RUN_QOUTFIFO;
-#ifdef AHD_TARGET_MODE
- if ((ahd->flags & AHD_TARGETROLE) != 0
- && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) {
- ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
- ahd->shared_data_map.dmamap,
- ahd_targetcmd_offset(ahd, ahd->tqinfifofnext),
- /*len*/sizeof(struct target_cmd),
- BUS_DMASYNC_POSTREAD);
- if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0)
- retval |= AHD_RUN_TQINFIFO;
- }
-#endif
- return (retval);
-}
-
-/*
- * Catch an interrupt from the adapter
- */
-static __inline int
-ahd_intr(struct ahd_softc *ahd)
-{
- u_int intstat;
-
- if ((ahd->pause & INTEN) == 0) {
- /*
- * Our interrupt is not enabled on the chip
- * and may be disabled for re-entrancy reasons,
- * so just return. This is likely just a shared
- * interrupt.
- */
- return (0);
- }
-
- /*
- * Instead of directly reading the interrupt status register,
- * infer the cause of the interrupt by checking our in-core
- * completion queues. This avoids a costly PCI bus read in
- * most cases.
- */
- if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
- && (ahd_check_cmdcmpltqueues(ahd) != 0))
- intstat = CMDCMPLT;
- else
- intstat = ahd_inb(ahd, INTSTAT);
-
- if ((intstat & INT_PEND) == 0)
- return (0);
-
- if (intstat & CMDCMPLT) {
- ahd_outb(ahd, CLRINT, CLRCMDINT);
-
- /*
- * Ensure that the chip sees that we've cleared
- * this interrupt before we walk the output fifo.
- * Otherwise, we may, due to posted bus writes,
- * clear the interrupt after we finish the scan,
- * and after the sequencer has added new entries
- * and asserted the interrupt again.
- */
- if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
- if (ahd_is_paused(ahd)) {
- /*
- * Potentially lost SEQINT.
- * If SEQINTCODE is non-zero,
- * simulate the SEQINT.
- */
- if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
- intstat |= SEQINT;
- }
- } else {
- ahd_flush_device_writes(ahd);
- }
- ahd_run_qoutfifo(ahd);
- ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
- ahd->cmdcmplt_total++;
-#ifdef AHD_TARGET_MODE
- if ((ahd->flags & AHD_TARGETROLE) != 0)
- ahd_run_tqinfifo(ahd, /*paused*/FALSE);
-#endif
- }
-
- /*
- * Handle statuses that may invalidate our cached
- * copy of INTSTAT separately.
- */
- if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) {
- /* Hot eject. Do nothing */
- } else if (intstat & HWERRINT) {
- ahd_handle_hwerrint(ahd);
- } else if ((intstat & (PCIINT|SPLTINT)) != 0) {
- ahd->bus_intr(ahd);
- } else {
-
- if ((intstat & SEQINT) != 0)
- ahd_handle_seqint(ahd, intstat);
-
- if ((intstat & SCSIINT) != 0)
- ahd_handle_scsiint(ahd, intstat);
- }
- return (1);
-}
+int ahd_intr(struct ahd_softc *ahd);
#endif /* _AIC79XX_INLINE_H_ */
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 0081aa357c8b..0f829b3b8ab7 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -193,7 +193,7 @@ struct ahd_linux_iocell_opts
#define AIC79XX_PRECOMP_INDEX 0
#define AIC79XX_SLEWRATE_INDEX 1
#define AIC79XX_AMPLITUDE_INDEX 2
-static struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
+static const struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
{
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
@@ -369,10 +369,167 @@ static void ahd_release_simq(struct ahd_softc *ahd);
static int ahd_linux_unit;
+/************************** OS Utility Wrappers *******************************/
+void ahd_delay(long);
+void
+ahd_delay(long usec)
+{
+ /*
+ * udelay on Linux can have problems for
+ * multi-millisecond waits. Wait at most
+ * 1024us per call.
+ */
+ while (usec > 0) {
+ udelay(usec % 1024);
+ usec -= 1024;
+ }
+}
+
+
+/***************************** Low Level I/O **********************************/
+uint8_t ahd_inb(struct ahd_softc * ahd, long port);
+void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
+void ahd_outw_atomic(struct ahd_softc * ahd,
+ long port, uint16_t val);
+void ahd_outsb(struct ahd_softc * ahd, long port,
+ uint8_t *, int count);
+void ahd_insb(struct ahd_softc * ahd, long port,
+ uint8_t *, int count);
+
+uint8_t
+ahd_inb(struct ahd_softc * ahd, long port)
+{
+ uint8_t x;
+
+ if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+ x = readb(ahd->bshs[0].maddr + port);
+ } else {
+ x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
+ }
+ mb();
+ return (x);
+}
+
+#if 0 /* unused */
+static uint16_t
+ahd_inw_atomic(struct ahd_softc * ahd, long port)
+{
+ uint8_t x;
+
+ if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+ x = readw(ahd->bshs[0].maddr + port);
+ } else {
+ x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
+ }
+ mb();
+ return (x);
+}
+#endif
+
+void
+ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
+{
+ if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+ writeb(val, ahd->bshs[0].maddr + port);
+ } else {
+ outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
+ }
+ mb();
+}
+
+void
+ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
+{
+ if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+ writew(val, ahd->bshs[0].maddr + port);
+ } else {
+ outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
+ }
+ mb();
+}
+
+void
+ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
+{
+ int i;
+
+ /*
+ * There is probably a more efficient way to do this on Linux
+ * but we don't use this for anything speed critical and this
+ * should work.
+ */
+ for (i = 0; i < count; i++)
+ ahd_outb(ahd, port, *array++);
+}
+
+void
+ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
+{
+ int i;
+
+ /*
+ * There is probably a more efficient way to do this on Linux
+ * but we don't use this for anything speed critical and this
+ * should work.
+ */
+ for (i = 0; i < count; i++)
+ *array++ = ahd_inb(ahd, port);
+}
+
+/******************************* PCI Routines *********************************/
+uint32_t
+ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
+{
+ switch (width) {
+ case 1:
+ {
+ uint8_t retval;
+
+ pci_read_config_byte(pci, reg, &retval);
+ return (retval);
+ }
+ case 2:
+ {
+ uint16_t retval;
+ pci_read_config_word(pci, reg, &retval);
+ return (retval);
+ }
+ case 4:
+ {
+ uint32_t retval;
+ pci_read_config_dword(pci, reg, &retval);
+ return (retval);
+ }
+ default:
+ panic("ahd_pci_read_config: Read size too big");
+ /* NOTREACHED */
+ return (0);
+ }
+}
+
+void
+ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
+{
+ switch (width) {
+ case 1:
+ pci_write_config_byte(pci, reg, value);
+ break;
+ case 2:
+ pci_write_config_word(pci, reg, value);
+ break;
+ case 4:
+ pci_write_config_dword(pci, reg, value);
+ break;
+ default:
+ panic("ahd_pci_write_config: Write size too big");
+ /* NOTREACHED */
+ }
+}
+
/****************************** Inlines ***************************************/
-static __inline void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
+static void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
-static __inline void
+static void
ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
{
struct scsi_cmnd *cmd;
@@ -400,13 +557,11 @@ ahd_linux_info(struct Scsi_Host *host)
bp = &buffer[0];
ahd = *(struct ahd_softc **)host->hostdata;
memset(bp, 0, sizeof(buffer));
- strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev ");
- strcat(bp, AIC79XX_DRIVER_VERSION);
- strcat(bp, "\n");
- strcat(bp, " <");
+ strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev " AIC79XX_DRIVER_VERSION "\n"
+ " <");
strcat(bp, ahd->description);
- strcat(bp, ">\n");
- strcat(bp, " ");
+ strcat(bp, ">\n"
+ " ");
ahd_controller_info(ahd, ahd_info);
strcat(bp, ahd_info);
@@ -432,7 +587,7 @@ ahd_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
return rtn;
}
-static inline struct scsi_target **
+static struct scsi_target **
ahd_linux_target_in_softc(struct scsi_target *starget)
{
struct ahd_softc *ahd =
@@ -991,7 +1146,7 @@ aic79xx_setup(char *s)
char *p;
char *end;
- static struct {
+ static const struct {
const char *name;
uint32_t *flag;
} options[] = {
@@ -1223,7 +1378,7 @@ ahd_platform_init(struct ahd_softc *ahd)
* Lookup and commit any modified IO Cell options.
*/
if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) {
- struct ahd_linux_iocell_opts *iocell_opts;
+ const struct ahd_linux_iocell_opts *iocell_opts;
iocell_opts = &aic79xx_iocell_info[ahd->unit];
if (iocell_opts->precomp != AIC79XX_DEFAULT_PRECOMP)
@@ -2613,7 +2768,7 @@ static void ahd_linux_set_pcomp_en(struct scsi_target *starget, int pcomp)
uint8_t precomp;
if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) {
- struct ahd_linux_iocell_opts *iocell_opts;
+ const struct ahd_linux_iocell_opts *iocell_opts;
iocell_opts = &aic79xx_iocell_info[ahd->unit];
precomp = iocell_opts->precomp;
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 853998be1474..8d6612c19922 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -222,22 +222,6 @@ typedef struct timer_list ahd_timer_t;
/***************************** Timer Facilities *******************************/
#define ahd_timer_init init_timer
#define ahd_timer_stop del_timer_sync
-typedef void ahd_linux_callback_t (u_long);
-static __inline void ahd_timer_reset(ahd_timer_t *timer, int usec,
- ahd_callback_t *func, void *arg);
-
-static __inline void
-ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
-{
- struct ahd_softc *ahd;
-
- ahd = (struct ahd_softc *)arg;
- del_timer(timer);
- timer->data = (u_long)arg;
- timer->expires = jiffies + (usec * HZ)/1000000;
- timer->function = (ahd_linux_callback_t*)func;
- add_timer(timer);
-}
/***************************** SMP support ************************************/
#include <linux/spinlock.h>
@@ -376,7 +360,7 @@ struct ahd_platform_data {
#define AHD_LINUX_NOIRQ ((uint32_t)~0)
uint32_t irq; /* IRQ for this adapter */
uint32_t bios_address;
- uint32_t mem_busaddr; /* Mem Base Addr */
+ resource_size_t mem_busaddr; /* Mem Base Addr */
};
/************************** OS Utility Wrappers *******************************/
@@ -386,111 +370,18 @@ struct ahd_platform_data {
#define malloc(size, type, flags) kmalloc(size, flags)
#define free(ptr, type) kfree(ptr)
-static __inline void ahd_delay(long);
-static __inline void
-ahd_delay(long usec)
-{
- /*
- * udelay on Linux can have problems for
- * multi-millisecond waits. Wait at most
- * 1024us per call.
- */
- while (usec > 0) {
- udelay(usec % 1024);
- usec -= 1024;
- }
-}
-
+void ahd_delay(long);
/***************************** Low Level I/O **********************************/
-static __inline uint8_t ahd_inb(struct ahd_softc * ahd, long port);
-static __inline uint16_t ahd_inw_atomic(struct ahd_softc * ahd, long port);
-static __inline void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
-static __inline void ahd_outw_atomic(struct ahd_softc * ahd,
+uint8_t ahd_inb(struct ahd_softc * ahd, long port);
+void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
+void ahd_outw_atomic(struct ahd_softc * ahd,
long port, uint16_t val);
-static __inline void ahd_outsb(struct ahd_softc * ahd, long port,
+void ahd_outsb(struct ahd_softc * ahd, long port,
uint8_t *, int count);
-static __inline void ahd_insb(struct ahd_softc * ahd, long port,
+void ahd_insb(struct ahd_softc * ahd, long port,
uint8_t *, int count);
-static __inline uint8_t
-ahd_inb(struct ahd_softc * ahd, long port)
-{
- uint8_t x;
-
- if (ahd->tags[0] == BUS_SPACE_MEMIO) {
- x = readb(ahd->bshs[0].maddr + port);
- } else {
- x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
- }
- mb();
- return (x);
-}
-
-static __inline uint16_t
-ahd_inw_atomic(struct ahd_softc * ahd, long port)
-{
- uint8_t x;
-
- if (ahd->tags[0] == BUS_SPACE_MEMIO) {
- x = readw(ahd->bshs[0].maddr + port);
- } else {
- x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
- }
- mb();
- return (x);
-}
-
-static __inline void
-ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
-{
- if (ahd->tags[0] == BUS_SPACE_MEMIO) {
- writeb(val, ahd->bshs[0].maddr + port);
- } else {
- outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
- }
- mb();
-}
-
-static __inline void
-ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
-{
- if (ahd->tags[0] == BUS_SPACE_MEMIO) {
- writew(val, ahd->bshs[0].maddr + port);
- } else {
- outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
- }
- mb();
-}
-
-static __inline void
-ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
-{
- int i;
-
- /*
- * There is probably a more efficient way to do this on Linux
- * but we don't use this for anything speed critical and this
- * should work.
- */
- for (i = 0; i < count; i++)
- ahd_outb(ahd, port, *array++);
-}
-
-static __inline void
-ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
-{
- int i;
-
- /*
- * There is probably a more efficient way to do this on Linux
- * but we don't use this for anything speed critical and this
- * should work.
- */
- for (i = 0; i < count; i++)
- *array++ = ahd_inb(ahd, port);
-}
-
/**************************** Initialization **********************************/
int ahd_linux_register_host(struct ahd_softc *,
struct scsi_host_template *);
@@ -593,62 +484,12 @@ void ahd_linux_pci_exit(void);
int ahd_pci_map_registers(struct ahd_softc *ahd);
int ahd_pci_map_int(struct ahd_softc *ahd);
-static __inline uint32_t ahd_pci_read_config(ahd_dev_softc_t pci,
+uint32_t ahd_pci_read_config(ahd_dev_softc_t pci,
int reg, int width);
-
-static __inline uint32_t
-ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
-{
- switch (width) {
- case 1:
- {
- uint8_t retval;
-
- pci_read_config_byte(pci, reg, &retval);
- return (retval);
- }
- case 2:
- {
- uint16_t retval;
- pci_read_config_word(pci, reg, &retval);
- return (retval);
- }
- case 4:
- {
- uint32_t retval;
- pci_read_config_dword(pci, reg, &retval);
- return (retval);
- }
- default:
- panic("ahd_pci_read_config: Read size too big");
- /* NOTREACHED */
- return (0);
- }
-}
-
-static __inline void ahd_pci_write_config(ahd_dev_softc_t pci,
+void ahd_pci_write_config(ahd_dev_softc_t pci,
int reg, uint32_t value,
int width);
-static __inline void
-ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
-{
- switch (width) {
- case 1:
- pci_write_config_byte(pci, reg, value);
- break;
- case 2:
- pci_write_config_word(pci, reg, value);
- break;
- case 4:
- pci_write_config_dword(pci, reg, value);
- break;
- default:
- panic("ahd_pci_write_config: Write size too big");
- /* NOTREACHED */
- }
-}
-
static __inline int ahd_get_pci_function(ahd_dev_softc_t);
static __inline int
ahd_get_pci_function(ahd_dev_softc_t pci)
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index dfaaae5e73ae..6593056867f6 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -49,7 +49,7 @@
ID2C(x), \
ID2C(IDIROC(x))
-static struct pci_device_id ahd_linux_pci_id_table[] = {
+static const struct pci_device_id ahd_linux_pci_id_table[] = {
/* aic7901 based controllers */
ID(ID_AHA_29320A),
ID(ID_AHA_29320ALP),
@@ -159,7 +159,7 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
char buf[80];
struct ahd_softc *ahd;
ahd_dev_softc_t pci;
- struct ahd_pci_identity *entry;
+ const struct ahd_pci_identity *entry;
char *name;
int error;
struct device *dev = &pdev->dev;
@@ -249,8 +249,8 @@ ahd_linux_pci_exit(void)
}
static int
-ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, u_long *base,
- u_long *base2)
+ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, resource_size_t *base,
+ resource_size_t *base2)
{
*base = pci_resource_start(ahd->dev_softc, 0);
/*
@@ -272,11 +272,11 @@ ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, u_long *base,
static int
ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd,
- u_long *bus_addr,
+ resource_size_t *bus_addr,
uint8_t __iomem **maddr)
{
- u_long start;
- u_long base_page;
+ resource_size_t start;
+ resource_size_t base_page;
u_long base_offset;
int error = 0;
@@ -310,7 +310,7 @@ int
ahd_pci_map_registers(struct ahd_softc *ahd)
{
uint32_t command;
- u_long base;
+ resource_size_t base;
uint8_t __iomem *maddr;
int error;
@@ -346,31 +346,32 @@ ahd_pci_map_registers(struct ahd_softc *ahd)
} else
command |= PCIM_CMD_MEMEN;
} else if (bootverbose) {
- printf("aic79xx: PCI%d:%d:%d MEM region 0x%lx "
+ printf("aic79xx: PCI%d:%d:%d MEM region 0x%llx "
"unavailable. Cannot memory map device.\n",
ahd_get_pci_bus(ahd->dev_softc),
ahd_get_pci_slot(ahd->dev_softc),
ahd_get_pci_function(ahd->dev_softc),
- base);
+ (unsigned long long)base);
}
if (maddr == NULL) {
- u_long base2;
+ resource_size_t base2;
error = ahd_linux_pci_reserve_io_regions(ahd, &base, &base2);
if (error == 0) {
ahd->tags[0] = BUS_SPACE_PIO;
ahd->tags[1] = BUS_SPACE_PIO;
- ahd->bshs[0].ioport = base;
- ahd->bshs[1].ioport = base2;
+ ahd->bshs[0].ioport = (u_long)base;
+ ahd->bshs[1].ioport = (u_long)base2;
command |= PCIM_CMD_PORTEN;
} else {
- printf("aic79xx: PCI%d:%d:%d IO regions 0x%lx and 0x%lx"
- "unavailable. Cannot map device.\n",
+ printf("aic79xx: PCI%d:%d:%d IO regions 0x%llx and "
+ "0x%llx unavailable. Cannot map device.\n",
ahd_get_pci_bus(ahd->dev_softc),
ahd_get_pci_slot(ahd->dev_softc),
ahd_get_pci_function(ahd->dev_softc),
- base, base2);
+ (unsigned long long)base,
+ (unsigned long long)base2);
}
}
ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, 4);
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index c9f79fdf9131..c25b6adffbf9 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -97,7 +97,7 @@ static ahd_device_setup_t ahd_aic7901A_setup;
static ahd_device_setup_t ahd_aic7902_setup;
static ahd_device_setup_t ahd_aic790X_setup;
-static struct ahd_pci_identity ahd_pci_ident_table [] =
+static const struct ahd_pci_identity ahd_pci_ident_table[] =
{
/* aic7901 based controllers */
{
@@ -253,7 +253,7 @@ static void ahd_configure_termination(struct ahd_softc *ahd,
static void ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat);
static void ahd_pci_intr(struct ahd_softc *ahd);
-struct ahd_pci_identity *
+const struct ahd_pci_identity *
ahd_find_pci_device(ahd_dev_softc_t pci)
{
uint64_t full_id;
@@ -261,7 +261,7 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
uint16_t vendor;
uint16_t subdevice;
uint16_t subvendor;
- struct ahd_pci_identity *entry;
+ const struct ahd_pci_identity *entry;
u_int i;
vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
@@ -292,7 +292,7 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
}
int
-ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry)
+ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
{
struct scb_data *shared_scb_data;
u_int command;
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c
index 6b28bebcbca0..014bed716e7c 100644
--- a/drivers/scsi/aic7xxx/aic79xx_proc.c
+++ b/drivers/scsi/aic7xxx/aic79xx_proc.c
@@ -57,7 +57,7 @@ static int ahd_proc_write_seeprom(struct ahd_softc *ahd,
* Table of syncrates that don't follow the "divisible by 4"
* rule. This table will be expanded in future SCSI specs.
*/
-static struct {
+static const struct {
u_int period_factor;
u_int period; /* in 100ths of ns */
} scsi_syncrates[] = {
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
index 2068e00d2c75..c21ceab8e913 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
@@ -48,13 +48,6 @@ ahd_reg_print_t ahd_error_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrerr_print;
-#else
-#define ahd_clrerr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CLRERR", 0x04, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_hcntrl_print;
#else
#define ahd_hcntrl_print(regvalue, cur_col, wrap) \
@@ -167,13 +160,6 @@ ahd_reg_print_t ahd_sg_cache_shadow_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_arbctl_print;
-#else
-#define ahd_arbctl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "ARBCTL", 0x1b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_sg_cache_pre_print;
#else
#define ahd_sg_cache_pre_print(regvalue, cur_col, wrap) \
@@ -188,20 +174,6 @@ ahd_reg_print_t ahd_lqin_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_typeptr_print;
-#else
-#define ahd_typeptr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "TYPEPTR", 0x20, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_tagptr_print;
-#else
-#define ahd_tagptr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "TAGPTR", 0x21, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_lunptr_print;
#else
#define ahd_lunptr_print(regvalue, cur_col, wrap) \
@@ -209,20 +181,6 @@ ahd_reg_print_t ahd_lunptr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_datalenptr_print;
-#else
-#define ahd_datalenptr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DATALENPTR", 0x23, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_statlenptr_print;
-#else
-#define ahd_statlenptr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "STATLENPTR", 0x24, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_cmdlenptr_print;
#else
#define ahd_cmdlenptr_print(regvalue, cur_col, wrap) \
@@ -258,13 +216,6 @@ ahd_reg_print_t ahd_qnextptr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_idptr_print;
-#else
-#define ahd_idptr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "IDPTR", 0x2a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_abrtbyteptr_print;
#else
#define ahd_abrtbyteptr_print(regvalue, cur_col, wrap) \
@@ -279,27 +230,6 @@ ahd_reg_print_t ahd_abrtbitptr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_maxcmdbytes_print;
-#else
-#define ahd_maxcmdbytes_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "MAXCMDBYTES", 0x2d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_maxcmd2rcv_print;
-#else
-#define ahd_maxcmd2rcv_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "MAXCMD2RCV", 0x2e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_shortthresh_print;
-#else
-#define ahd_shortthresh_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SHORTTHRESH", 0x2f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_lunlen_print;
#else
#define ahd_lunlen_print(regvalue, cur_col, wrap) \
@@ -328,41 +258,6 @@ ahd_reg_print_t ahd_maxcmdcnt_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqrsvd01_print;
-#else
-#define ahd_lqrsvd01_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "LQRSVD01", 0x34, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqrsvd16_print;
-#else
-#define ahd_lqrsvd16_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "LQRSVD16", 0x35, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqrsvd17_print;
-#else
-#define ahd_lqrsvd17_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "LQRSVD17", 0x36, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmdrsvd0_print;
-#else
-#define ahd_cmdrsvd0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMDRSVD0", 0x37, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqctl0_print;
-#else
-#define ahd_lqctl0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "LQCTL0", 0x38, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_lqctl1_print;
#else
#define ahd_lqctl1_print(regvalue, cur_col, wrap) \
@@ -370,13 +265,6 @@ ahd_reg_print_t ahd_lqctl1_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsbist0_print;
-#else
-#define ahd_scsbist0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCSBIST0", 0x39, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_lqctl2_print;
#else
#define ahd_lqctl2_print(regvalue, cur_col, wrap) \
@@ -384,13 +272,6 @@ ahd_reg_print_t ahd_lqctl2_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsbist1_print;
-#else
-#define ahd_scsbist1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCSBIST1", 0x3a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scsiseq0_print;
#else
#define ahd_scsiseq0_print(regvalue, cur_col, wrap) \
@@ -412,20 +293,6 @@ ahd_reg_print_t ahd_sxfrctl0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dlcount_print;
-#else
-#define ahd_dlcount_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DLCOUNT", 0x3c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_businitid_print;
-#else
-#define ahd_businitid_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "BUSINITID", 0x3c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_sxfrctl1_print;
#else
#define ahd_sxfrctl1_print(regvalue, cur_col, wrap) \
@@ -433,20 +300,6 @@ ahd_reg_print_t ahd_sxfrctl1_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_bustargid_print;
-#else
-#define ahd_bustargid_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "BUSTARGID", 0x3e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sxfrctl2_print;
-#else
-#define ahd_sxfrctl2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SXFRCTL2", 0x3e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_dffstat_print;
#else
#define ahd_dffstat_print(regvalue, cur_col, wrap) \
@@ -454,17 +307,17 @@ ahd_reg_print_t ahd_dffstat_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsisigo_print;
+ahd_reg_print_t ahd_multargid_print;
#else
-#define ahd_scsisigo_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCSISIGO", 0x40, regvalue, cur_col, wrap)
+#define ahd_multargid_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "MULTARGID", 0x40, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_multargid_print;
+ahd_reg_print_t ahd_scsisigo_print;
#else
-#define ahd_multargid_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "MULTARGID", 0x40, regvalue, cur_col, wrap)
+#define ahd_scsisigo_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCSISIGO", 0x40, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -482,13 +335,6 @@ ahd_reg_print_t ahd_scsiphase_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsidat0_img_print;
-#else
-#define ahd_scsidat0_img_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCSIDAT0_IMG", 0x43, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scsidat_print;
#else
#define ahd_scsidat_print(regvalue, cur_col, wrap) \
@@ -531,13 +377,6 @@ ahd_reg_print_t ahd_sblkctl_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrsint0_print;
-#else
-#define ahd_clrsint0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CLRSINT0", 0x4b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_sstat0_print;
#else
#define ahd_sstat0_print(regvalue, cur_col, wrap) \
@@ -552,10 +391,10 @@ ahd_reg_print_t ahd_simode0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrsint1_print;
+ahd_reg_print_t ahd_clrsint0_print;
#else
-#define ahd_clrsint1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CLRSINT1", 0x4c, regvalue, cur_col, wrap)
+#define ahd_clrsint0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CLRSINT0", 0x4b, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -566,17 +405,17 @@ ahd_reg_print_t ahd_sstat1_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sstat2_print;
+ahd_reg_print_t ahd_clrsint1_print;
#else
-#define ahd_sstat2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SSTAT2", 0x4d, regvalue, cur_col, wrap)
+#define ahd_clrsint1_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CLRSINT1", 0x4c, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_simode2_print;
+ahd_reg_print_t ahd_sstat2_print;
#else
-#define ahd_simode2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SIMODE2", 0x4d, regvalue, cur_col, wrap)
+#define ahd_sstat2_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SSTAT2", 0x4d, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -622,17 +461,17 @@ ahd_reg_print_t ahd_lqistat0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrlqiint0_print;
+ahd_reg_print_t ahd_lqimode0_print;
#else
-#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap)
+#define ahd_lqimode0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqimode0_print;
+ahd_reg_print_t ahd_clrlqiint0_print;
#else
-#define ahd_lqimode0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap)
+#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -790,13 +629,6 @@ ahd_reg_print_t ahd_seqintsrc_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_currscb_print;
-#else
-#define ahd_currscb_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_seqimode_print;
#else
#define ahd_seqimode_print(regvalue, cur_col, wrap) \
@@ -804,24 +636,17 @@ ahd_reg_print_t ahd_seqimode_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_mdffstat_print;
-#else
-#define ahd_mdffstat_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "MDFFSTAT", 0x5d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_crccontrol_print;
+ahd_reg_print_t ahd_currscb_print;
#else
-#define ahd_crccontrol_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CRCCONTROL", 0x5d, regvalue, cur_col, wrap)
+#define ahd_currscb_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfftag_print;
+ahd_reg_print_t ahd_mdffstat_print;
#else
-#define ahd_dfftag_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFFTAG", 0x5e, regvalue, cur_col, wrap)
+#define ahd_mdffstat_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "MDFFSTAT", 0x5d, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -832,20 +657,6 @@ ahd_reg_print_t ahd_lastscb_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsitest_print;
-#else
-#define ahd_scsitest_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCSITEST", 0x5e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_iopdnctl_print;
-#else
-#define ahd_iopdnctl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "IOPDNCTL", 0x5f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_shaddr_print;
#else
#define ahd_shaddr_print(regvalue, cur_col, wrap) \
@@ -860,13 +671,6 @@ ahd_reg_print_t ahd_negoaddr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dgrpcrci_print;
-#else
-#define ahd_dgrpcrci_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DGRPCRCI", 0x60, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_negperiod_print;
#else
#define ahd_negperiod_print(regvalue, cur_col, wrap) \
@@ -874,13 +678,6 @@ ahd_reg_print_t ahd_negperiod_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_packcrci_print;
-#else
-#define ahd_packcrci_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "PACKCRCI", 0x62, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_negoffset_print;
#else
#define ahd_negoffset_print(regvalue, cur_col, wrap) \
@@ -930,13 +727,6 @@ ahd_reg_print_t ahd_iownid_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll960ctl0_print;
-#else
-#define ahd_pll960ctl0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "PLL960CTL0", 0x68, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_shcnt_print;
#else
#define ahd_shcnt_print(regvalue, cur_col, wrap) \
@@ -951,27 +741,6 @@ ahd_reg_print_t ahd_townid_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll960ctl1_print;
-#else
-#define ahd_pll960ctl1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "PLL960CTL1", 0x69, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll960cnt0_print;
-#else
-#define ahd_pll960cnt0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "PLL960CNT0", 0x6a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_xsig_print;
-#else
-#define ahd_xsig_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "XSIG", 0x6a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_seloid_print;
#else
#define ahd_seloid_print(regvalue, cur_col, wrap) \
@@ -979,41 +748,6 @@ ahd_reg_print_t ahd_seloid_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll400ctl0_print;
-#else
-#define ahd_pll400ctl0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "PLL400CTL0", 0x6c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_fairness_print;
-#else
-#define ahd_fairness_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "FAIRNESS", 0x6c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll400ctl1_print;
-#else
-#define ahd_pll400ctl1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "PLL400CTL1", 0x6d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_unfairness_print;
-#else
-#define ahd_unfairness_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "UNFAIRNESS", 0x6e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll400cnt0_print;
-#else
-#define ahd_pll400cnt0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "PLL400CNT0", 0x6e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_haddr_print;
#else
#define ahd_haddr_print(regvalue, cur_col, wrap) \
@@ -1021,27 +755,6 @@ ahd_reg_print_t ahd_haddr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_plldelay_print;
-#else
-#define ahd_plldelay_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "PLLDELAY", 0x70, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_hodmaadr_print;
-#else
-#define ahd_hodmaadr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "HODMAADR", 0x70, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_hodmacnt_print;
-#else
-#define ahd_hodmacnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "HODMACNT", 0x78, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_hcnt_print;
#else
#define ahd_hcnt_print(regvalue, cur_col, wrap) \
@@ -1049,10 +762,10 @@ ahd_reg_print_t ahd_hcnt_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_hodmaen_print;
+ahd_reg_print_t ahd_sghaddr_print;
#else
-#define ahd_hodmaen_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "HODMAEN", 0x7a, regvalue, cur_col, wrap)
+#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -1063,10 +776,10 @@ ahd_reg_print_t ahd_scbhaddr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sghaddr_print;
+ahd_reg_print_t ahd_sghcnt_print;
#else
-#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
+#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -1077,13 +790,6 @@ ahd_reg_print_t ahd_scbhcnt_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sghcnt_print;
-#else
-#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_dff_thrsh_print;
#else
#define ahd_dff_thrsh_print(regvalue, cur_col, wrap) \
@@ -1091,132 +797,6 @@ ahd_reg_print_t ahd_dff_thrsh_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_romaddr_print;
-#else
-#define ahd_romaddr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "ROMADDR", 0x8a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_romcntrl_print;
-#else
-#define ahd_romcntrl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "ROMCNTRL", 0x8d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_romdata_print;
-#else
-#define ahd_romdata_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "ROMDATA", 0x8e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcrxmsg0_print;
-#else
-#define ahd_cmcrxmsg0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMCRXMSG0", 0x90, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_roenable_print;
-#else
-#define ahd_roenable_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "ROENABLE", 0x90, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyrxmsg0_print;
-#else
-#define ahd_ovlyrxmsg0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYRXMSG0", 0x90, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchrxmsg0_print;
-#else
-#define ahd_dchrxmsg0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DCHRXMSG0", 0x90, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyrxmsg1_print;
-#else
-#define ahd_ovlyrxmsg1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYRXMSG1", 0x91, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_nsenable_print;
-#else
-#define ahd_nsenable_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "NSENABLE", 0x91, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcrxmsg1_print;
-#else
-#define ahd_cmcrxmsg1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMCRXMSG1", 0x91, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchrxmsg1_print;
-#else
-#define ahd_dchrxmsg1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DCHRXMSG1", 0x91, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchrxmsg2_print;
-#else
-#define ahd_dchrxmsg2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DCHRXMSG2", 0x92, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcrxmsg2_print;
-#else
-#define ahd_cmcrxmsg2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMCRXMSG2", 0x92, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ost_print;
-#else
-#define ahd_ost_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OST", 0x92, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyrxmsg2_print;
-#else
-#define ahd_ovlyrxmsg2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYRXMSG2", 0x92, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchrxmsg3_print;
-#else
-#define ahd_dchrxmsg3_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DCHRXMSG3", 0x93, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyrxmsg3_print;
-#else
-#define ahd_ovlyrxmsg3_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYRXMSG3", 0x93, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcrxmsg3_print;
-#else
-#define ahd_cmcrxmsg3_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMCRXMSG3", 0x93, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_pcixctl_print;
#else
#define ahd_pcixctl_print(regvalue, cur_col, wrap) \
@@ -1224,34 +804,6 @@ ahd_reg_print_t ahd_pcixctl_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyseqbcnt_print;
-#else
-#define ahd_ovlyseqbcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYSEQBCNT", 0x94, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchseqbcnt_print;
-#else
-#define ahd_dchseqbcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DCHSEQBCNT", 0x94, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcseqbcnt_print;
-#else
-#define ahd_cmcseqbcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMCSEQBCNT", 0x94, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcspltstat0_print;
-#else
-#define ahd_cmcspltstat0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMCSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_dchspltstat0_print;
#else
#define ahd_dchspltstat0_print(regvalue, cur_col, wrap) \
@@ -1259,27 +811,6 @@ ahd_reg_print_t ahd_dchspltstat0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyspltstat0_print;
-#else
-#define ahd_ovlyspltstat0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcspltstat1_print;
-#else
-#define ahd_cmcspltstat1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMCSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyspltstat1_print;
-#else
-#define ahd_ovlyspltstat1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_dchspltstat1_print;
#else
#define ahd_dchspltstat1_print(regvalue, cur_col, wrap) \
@@ -1287,90 +818,6 @@ ahd_reg_print_t ahd_dchspltstat1_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgrxmsg0_print;
-#else
-#define ahd_sgrxmsg0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGRXMSG0", 0x98, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutadr0_print;
-#else
-#define ahd_slvspltoutadr0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SLVSPLTOUTADR0", 0x98, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgrxmsg1_print;
-#else
-#define ahd_sgrxmsg1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGRXMSG1", 0x99, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutadr1_print;
-#else
-#define ahd_slvspltoutadr1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SLVSPLTOUTADR1", 0x99, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgrxmsg2_print;
-#else
-#define ahd_sgrxmsg2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGRXMSG2", 0x9a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutadr2_print;
-#else
-#define ahd_slvspltoutadr2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SLVSPLTOUTADR2", 0x9a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgrxmsg3_print;
-#else
-#define ahd_sgrxmsg3_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGRXMSG3", 0x9b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutadr3_print;
-#else
-#define ahd_slvspltoutadr3_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SLVSPLTOUTADR3", 0x9b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgseqbcnt_print;
-#else
-#define ahd_sgseqbcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGSEQBCNT", 0x9c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutattr0_print;
-#else
-#define ahd_slvspltoutattr0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SLVSPLTOUTATTR0", 0x9c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutattr1_print;
-#else
-#define ahd_slvspltoutattr1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SLVSPLTOUTATTR1", 0x9d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutattr2_print;
-#else
-#define ahd_slvspltoutattr2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SLVSPLTOUTATTR2", 0x9e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_sgspltstat0_print;
#else
#define ahd_sgspltstat0_print(regvalue, cur_col, wrap) \
@@ -1385,13 +832,6 @@ ahd_reg_print_t ahd_sgspltstat1_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sfunct_print;
-#else
-#define ahd_sfunct_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SFUNCT", 0x9f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_df0pcistat_print;
#else
#define ahd_df0pcistat_print(regvalue, cur_col, wrap) \
@@ -1406,41 +846,6 @@ ahd_reg_print_t ahd_reg0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_df1pcistat_print;
-#else
-#define ahd_df1pcistat_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DF1PCISTAT", 0xa1, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgpcistat_print;
-#else
-#define ahd_sgpcistat_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGPCISTAT", 0xa2, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_reg1_print;
-#else
-#define ahd_reg1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "REG1", 0xa2, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcpcistat_print;
-#else
-#define ahd_cmcpcistat_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMCPCISTAT", 0xa3, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlypcistat_print;
-#else
-#define ahd_ovlypcistat_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYPCISTAT", 0xa4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_reg_isr_print;
#else
#define ahd_reg_isr_print(regvalue, cur_col, wrap) \
@@ -1455,13 +860,6 @@ ahd_reg_print_t ahd_sg_state_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_msipcistat_print;
-#else
-#define ahd_msipcistat_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "MSIPCISTAT", 0xa6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_targpcistat_print;
#else
#define ahd_targpcistat_print(regvalue, cur_col, wrap) \
@@ -1469,13 +867,6 @@ ahd_reg_print_t ahd_targpcistat_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_data_count_odd_print;
-#else
-#define ahd_data_count_odd_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DATA_COUNT_ODD", 0xa7, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scbptr_print;
#else
#define ahd_scbptr_print(regvalue, cur_col, wrap) \
@@ -1483,13 +874,6 @@ ahd_reg_print_t ahd_scbptr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ccscbacnt_print;
-#else
-#define ahd_ccscbacnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CCSCBACNT", 0xab, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scbautoptr_print;
#else
#define ahd_scbautoptr_print(regvalue, cur_col, wrap) \
@@ -1504,13 +888,6 @@ ahd_reg_print_t ahd_ccsgaddr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ccscbadr_bk_print;
-#else
-#define ahd_ccscbadr_bk_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CCSCBADR_BK", 0xac, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_ccscbaddr_print;
#else
#define ahd_ccscbaddr_print(regvalue, cur_col, wrap) \
@@ -1518,13 +895,6 @@ ahd_reg_print_t ahd_ccscbaddr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmc_rambist_print;
-#else
-#define ahd_cmc_rambist_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMC_RAMBIST", 0xad, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_ccscbctl_print;
#else
#define ahd_ccscbctl_print(regvalue, cur_col, wrap) \
@@ -1546,13 +916,6 @@ ahd_reg_print_t ahd_ccsgram_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_flexadr_print;
-#else
-#define ahd_flexadr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "FLEXADR", 0xb0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_ccscbram_print;
#else
#define ahd_ccscbram_print(regvalue, cur_col, wrap) \
@@ -1560,27 +923,6 @@ ahd_reg_print_t ahd_ccscbram_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_flexcnt_print;
-#else
-#define ahd_flexcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "FLEXCNT", 0xb3, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_flexdmastat_print;
-#else
-#define ahd_flexdmastat_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "FLEXDMASTAT", 0xb5, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_flexdata_print;
-#else
-#define ahd_flexdata_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "FLEXDATA", 0xb6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_brddat_print;
#else
#define ahd_brddat_print(regvalue, cur_col, wrap) \
@@ -1623,27 +965,6 @@ ahd_reg_print_t ahd_seestat_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scbcnt_print;
-#else
-#define ahd_scbcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCBCNT", 0xbf, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfwaddr_print;
-#else
-#define ahd_dfwaddr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFWADDR", 0xc0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dspfltrctl_print;
-#else
-#define ahd_dspfltrctl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DSPFLTRCTL", 0xc0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_dspdatactl_print;
#else
#define ahd_dspdatactl_print(regvalue, cur_col, wrap) \
@@ -1651,27 +972,6 @@ ahd_reg_print_t ahd_dspdatactl_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfraddr_print;
-#else
-#define ahd_dfraddr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFRADDR", 0xc2, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dspreqctl_print;
-#else
-#define ahd_dspreqctl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DSPREQCTL", 0xc2, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dspackctl_print;
-#else
-#define ahd_dspackctl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DSPACKCTL", 0xc3, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_dfdat_print;
#else
#define ahd_dfdat_print(regvalue, cur_col, wrap) \
@@ -1693,76 +993,6 @@ ahd_reg_print_t ahd_wrtbiasctl_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_rcvrbiosctl_print;
-#else
-#define ahd_rcvrbiosctl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "RCVRBIOSCTL", 0xc6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_wrtbiascalc_print;
-#else
-#define ahd_wrtbiascalc_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "WRTBIASCALC", 0xc7, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_rcvrbiascalc_print;
-#else
-#define ahd_rcvrbiascalc_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "RCVRBIASCALC", 0xc8, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfptrs_print;
-#else
-#define ahd_dfptrs_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFPTRS", 0xc8, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_skewcalc_print;
-#else
-#define ahd_skewcalc_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SKEWCALC", 0xc9, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfbkptr_print;
-#else
-#define ahd_dfbkptr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFBKPTR", 0xc9, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfdbctl_print;
-#else
-#define ahd_dfdbctl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFDBCTL", 0xcb, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfscnt_print;
-#else
-#define ahd_dfscnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFSCNT", 0xcc, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfbcnt_print;
-#else
-#define ahd_dfbcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFBCNT", 0xce, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyaddr_print;
-#else
-#define ahd_ovlyaddr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYADDR", 0xd4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_seqctl0_print;
#else
#define ahd_seqctl0_print(regvalue, cur_col, wrap) \
@@ -1770,13 +1000,6 @@ ahd_reg_print_t ahd_seqctl0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seqctl1_print;
-#else
-#define ahd_seqctl1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SEQCTL1", 0xd7, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_flags_print;
#else
#define ahd_flags_print(regvalue, cur_col, wrap) \
@@ -1826,20 +1049,6 @@ ahd_reg_print_t ahd_dindex_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_brkaddr0_print;
-#else
-#define ahd_brkaddr0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "BRKADDR0", 0xe6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_brkaddr1_print;
-#else
-#define ahd_brkaddr1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "BRKADDR1", 0xe6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_allones_print;
#else
#define ahd_allones_print(regvalue, cur_col, wrap) \
@@ -1875,13 +1084,6 @@ ahd_reg_print_t ahd_dindir_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_function1_print;
-#else
-#define ahd_function1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "FUNCTION1", 0xf0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_stack_print;
#else
#define ahd_stack_print(regvalue, cur_col, wrap) \
@@ -1903,13 +1105,6 @@ ahd_reg_print_t ahd_curaddr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lastaddr_print;
-#else
-#define ahd_lastaddr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "LASTADDR", 0xf6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_intvec2_addr_print;
#else
#define ahd_intvec2_addr_print(regvalue, cur_col, wrap) \
@@ -1931,24 +1126,17 @@ ahd_reg_print_t ahd_accum_save_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_waiting_scb_tails_print;
-#else
-#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ahd_pci_config_base_print;
+ahd_reg_print_t ahd_sram_base_print;
#else
-#define ahd_ahd_pci_config_base_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "AHD_PCI_CONFIG_BASE", 0x100, regvalue, cur_col, wrap)
+#define ahd_sram_base_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sram_base_print;
+ahd_reg_print_t ahd_waiting_scb_tails_print;
#else
-#define ahd_sram_base_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap)
+#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -2218,17 +1406,17 @@ ahd_reg_print_t ahd_mk_message_scsiid_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_base_print;
+ahd_reg_print_t ahd_scb_residual_datacnt_print;
#else
-#define ahd_scb_base_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCB_BASE", 0x180, regvalue, cur_col, wrap)
+#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_residual_datacnt_print;
+ahd_reg_print_t ahd_scb_base_print;
#else
-#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
+#define ahd_scb_base_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_BASE", 0x180, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -2246,27 +1434,6 @@ ahd_reg_print_t ahd_scb_scsi_status_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_target_phases_print;
-#else
-#define ahd_scb_target_phases_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCB_TARGET_PHASES", 0x189, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_target_data_dir_print;
-#else
-#define ahd_scb_target_data_dir_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCB_TARGET_DATA_DIR", 0x18a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_target_itag_print;
-#else
-#define ahd_scb_target_itag_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCB_TARGET_ITAG", 0x18b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scb_sense_busaddr_print;
#else
#define ahd_scb_sense_busaddr_print(regvalue, cur_col, wrap) \
@@ -2365,13 +1532,6 @@ ahd_reg_print_t ahd_scb_next2_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_spare_print;
-#else
-#define ahd_scb_spare_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCB_SPARE", 0x1b0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scb_disconnected_lists_print;
#else
#define ahd_scb_disconnected_lists_print(regvalue, cur_col, wrap) \
@@ -2557,10 +1717,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SG_CACHE_PRE 0x1b
-#define LQIN 0x20
-
#define TYPEPTR 0x20
+#define LQIN 0x20
+
#define TAGPTR 0x21
#define LUNPTR 0x22
@@ -2620,14 +1780,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SINGLECMD 0x02
#define ABORTPENDING 0x01
-#define SCSBIST0 0x39
-#define GSBISTERR 0x40
-#define GSBISTDONE 0x20
-#define GSBISTRUN 0x10
-#define OSBISTERR 0x04
-#define OSBISTDONE 0x02
-#define OSBISTRUN 0x01
-
#define LQCTL2 0x39
#define LQIRETRY 0x80
#define LQICONTINUE 0x40
@@ -2638,10 +1790,13 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define LQOTOIDLE 0x02
#define LQOPAUSE 0x01
-#define SCSBIST1 0x3a
-#define NTBISTERR 0x04
-#define NTBISTDONE 0x02
-#define NTBISTRUN 0x01
+#define SCSBIST0 0x39
+#define GSBISTERR 0x40
+#define GSBISTDONE 0x20
+#define GSBISTRUN 0x10
+#define OSBISTERR 0x04
+#define OSBISTDONE 0x02
+#define OSBISTRUN 0x01
#define SCSISEQ0 0x3a
#define TEMODEO 0x80
@@ -2650,8 +1805,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define FORCEBUSFREE 0x10
#define SCSIRSTO 0x01
+#define SCSBIST1 0x3a
+#define NTBISTERR 0x04
+#define NTBISTDONE 0x02
+#define NTBISTRUN 0x01
+
#define SCSISEQ1 0x3b
+#define BUSINITID 0x3c
+
#define SXFRCTL0 0x3c
#define DFON 0x80
#define DFPEXP 0x40
@@ -2660,8 +1822,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define DLCOUNT 0x3c
-#define BUSINITID 0x3c
-
#define SXFRCTL1 0x3d
#define BITBUCKET 0x80
#define ENSACHK 0x40
@@ -2686,6 +1846,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define CURRFIFO_1 0x01
#define CURRFIFO_0 0x00
+#define MULTARGID 0x40
+
#define SCSISIGO 0x40
#define CDO 0x80
#define IOO 0x40
@@ -2696,8 +1858,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define REQO 0x02
#define ACKO 0x01
-#define MULTARGID 0x40
-
#define SCSISIGI 0x41
#define ATNI 0x10
#define SELI 0x08
@@ -2744,15 +1904,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define ENAB20 0x04
#define SELWIDE 0x02
-#define CLRSINT0 0x4b
-#define CLRSELDO 0x40
-#define CLRSELDI 0x20
-#define CLRSELINGO 0x10
-#define CLRIOERR 0x08
-#define CLROVERRUN 0x04
-#define CLRSPIORDY 0x02
-#define CLRARBDO 0x01
-
#define SSTAT0 0x4b
#define TARGET 0x80
#define SELDO 0x40
@@ -2772,14 +1923,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define ENSPIORDY 0x02
#define ENARBDO 0x01
-#define CLRSINT1 0x4c
-#define CLRSELTIMEO 0x80
-#define CLRATNO 0x40
-#define CLRSCSIRSTI 0x20
-#define CLRBUSFREE 0x08
-#define CLRSCSIPERR 0x04
-#define CLRSTRB2FAST 0x02
-#define CLRREQINIT 0x01
+#define CLRSINT0 0x4b
+#define CLRSELDO 0x40
+#define CLRSELDI 0x20
+#define CLRSELINGO 0x10
+#define CLRIOERR 0x08
+#define CLROVERRUN 0x04
+#define CLRSPIORDY 0x02
+#define CLRARBDO 0x01
#define SSTAT1 0x4c
#define SELTO 0x80
@@ -2791,6 +1942,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define STRB2FAST 0x02
#define REQINIT 0x01
+#define CLRSINT1 0x4c
+#define CLRSELTIMEO 0x80
+#define CLRATNO 0x40
+#define CLRSCSIRSTI 0x20
+#define CLRBUSFREE 0x08
+#define CLRSCSIPERR 0x04
+#define CLRSTRB2FAST 0x02
+#define CLRREQINIT 0x01
+
#define SSTAT2 0x4d
#define BUSFREETIME 0xc0
#define NONPACKREQ 0x20
@@ -2838,14 +1998,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define LQIATNLQ 0x02
#define LQIATNCMD 0x01
-#define CLRLQIINT0 0x50
-#define CLRLQIATNQAS 0x20
-#define CLRLQICRCT1 0x10
-#define CLRLQICRCT2 0x08
-#define CLRLQIBADLQT 0x04
-#define CLRLQIATNLQ 0x02
-#define CLRLQIATNCMD 0x01
-
#define LQIMODE0 0x50
#define ENLQIATNQASK 0x20
#define ENLQICRCT1 0x10
@@ -2854,6 +2006,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define ENLQIATNLQ 0x02
#define ENLQIATNCMD 0x01
+#define CLRLQIINT0 0x50
+#define CLRLQIATNQAS 0x20
+#define CLRLQICRCT1 0x10
+#define CLRLQICRCT2 0x08
+#define CLRLQIBADLQT 0x04
+#define CLRLQIATNLQ 0x02
+#define CLRLQIATNCMD 0x01
+
#define LQIMODE1 0x51
#define ENLQIPHASE_LQ 0x80
#define ENLQIPHASE_NLQ 0x40
@@ -2976,6 +2136,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define LQOSCSCTL 0x5a
#define LQOH2A_VERSION 0x80
+#define LQOBUSETDLY 0x40
+#define LQONOHOLDLACK 0x02
#define LQONOCHKOVER 0x01
#define NEXTSCB 0x5a
@@ -2998,8 +2160,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define CFG4ICMD 0x02
#define CFG4TCMD 0x01
-#define CURRSCB 0x5c
-
#define SEQIMODE 0x5c
#define ENCTXTDONE 0x40
#define ENSAVEPTRS 0x20
@@ -3009,6 +2169,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define ENCFG4ICMD 0x02
#define ENCFG4TCMD 0x01
+#define CURRSCB 0x5c
+
#define MDFFSTAT 0x5d
#define SHCNTNEGATIVE 0x40
#define SHCNTMINUS1 0x20
@@ -3023,29 +2185,29 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define DFFTAG 0x5e
-#define LASTSCB 0x5e
-
#define SCSITEST 0x5e
#define CNTRTEST 0x08
#define SEL_TXPLL_DEBUG 0x04
+#define LASTSCB 0x5e
+
#define IOPDNCTL 0x5f
#define DISABLE_OE 0x80
#define PDN_IDIST 0x04
#define PDN_DIFFSENSE 0x01
+#define DGRPCRCI 0x60
+
#define SHADDR 0x60
#define NEGOADDR 0x60
-#define DGRPCRCI 0x60
-
#define NEGPERIOD 0x61
-#define PACKCRCI 0x62
-
#define NEGOFFSET 0x62
+#define PACKCRCI 0x62
+
#define NEGPPROPTS 0x63
#define PPROPT_PACE 0x08
#define PPROPT_QAS 0x04
@@ -3066,6 +2228,7 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define ANNEXDAT 0x66
#define SCSCHKN 0x66
+#define BIDICHKDIS 0x80
#define STSELSKIDDIS 0x40
#define CURRFIFODEF 0x20
#define WIDERESEN 0x10
@@ -3090,6 +2253,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SELOID 0x6b
+#define FAIRNESS 0x6c
+
#define PLL400CTL0 0x6c
#define PLL_VCOSEL 0x80
#define PLL_PWDN 0x40
@@ -3099,8 +2264,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define PLL_DLPF 0x02
#define PLL_ENFBM 0x01
-#define FAIRNESS 0x6c
-
#define PLL400CTL1 0x6d
#define PLL_CNTEN 0x80
#define PLL_CNTCLR 0x40
@@ -3112,25 +2275,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define HADDR 0x70
+#define HODMAADR 0x70
+
#define PLLDELAY 0x70
#define SPLIT_DROP_REQ 0x80
-#define HODMAADR 0x70
+#define HCNT 0x78
#define HODMACNT 0x78
-#define HCNT 0x78
-
#define HODMAEN 0x7a
-#define SCBHADDR 0x7c
-
#define SGHADDR 0x7c
-#define SCBHCNT 0x84
+#define SCBHADDR 0x7c
#define SGHCNT 0x84
+#define SCBHCNT 0x84
+
#define DFF_THRSH 0x88
#define WR_DFTHRSH 0x70
#define RD_DFTHRSH 0x07
@@ -3163,6 +2326,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define CMCRXMSG0 0x90
+#define OVLYRXMSG0 0x90
+
+#define DCHRXMSG0 0x90
+
#define ROENABLE 0x90
#define MSIROEN 0x20
#define OVLYROEN 0x10
@@ -3171,11 +2338,11 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define DCH1ROEN 0x02
#define DCH0ROEN 0x01
-#define OVLYRXMSG0 0x90
+#define OVLYRXMSG1 0x91
-#define DCHRXMSG0 0x90
+#define CMCRXMSG1 0x91
-#define OVLYRXMSG1 0x91
+#define DCHRXMSG1 0x91
#define NSENABLE 0x91
#define MSINSEN 0x20
@@ -3185,10 +2352,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define DCH1NSEN 0x02
#define DCH0NSEN 0x01
-#define CMCRXMSG1 0x91
-
-#define DCHRXMSG1 0x91
-
#define DCHRXMSG2 0x92
#define CMCRXMSG2 0x92
@@ -3212,24 +2375,24 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define TSCSERREN 0x02
#define CMPABCDIS 0x01
+#define CMCSEQBCNT 0x94
+
#define OVLYSEQBCNT 0x94
#define DCHSEQBCNT 0x94
-#define CMCSEQBCNT 0x94
-
-#define CMCSPLTSTAT0 0x96
-
#define DCHSPLTSTAT0 0x96
#define OVLYSPLTSTAT0 0x96
-#define CMCSPLTSTAT1 0x97
+#define CMCSPLTSTAT0 0x96
#define OVLYSPLTSTAT1 0x97
#define DCHSPLTSTAT1 0x97
+#define CMCSPLTSTAT1 0x97
+
#define SGRXMSG0 0x98
#define CDNUM 0xf8
#define CFNUM 0x07
@@ -3257,18 +2420,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define TAG_NUM 0x1f
#define RLXORD 0x10
-#define SGSEQBCNT 0x9c
-
#define SLVSPLTOUTATTR0 0x9c
#define LOWER_BCNT 0xff
+#define SGSEQBCNT 0x9c
+
#define SLVSPLTOUTATTR1 0x9d
#define CMPLT_DNUM 0xf8
#define CMPLT_FNUM 0x07
-#define SLVSPLTOUTATTR2 0x9e
-#define CMPLT_BNUM 0xff
-
#define SGSPLTSTAT0 0x9e
#define STAETERM 0x80
#define SCBCERR 0x40
@@ -3279,6 +2439,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define RXSCEMSG 0x02
#define RXSPLTRSP 0x01
+#define SLVSPLTOUTATTR2 0x9e
+#define CMPLT_BNUM 0xff
+
#define SGSPLTSTAT1 0x9f
#define RXDATABUCKET 0x01
@@ -3334,10 +2497,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define CCSGADDR 0xac
-#define CCSCBADR_BK 0xac
-
#define CCSCBADDR 0xac
+#define CCSCBADR_BK 0xac
+
#define CMC_RAMBIST 0xad
#define SG_ELEMENT_SIZE 0x80
#define SCBRAMBIST_FAIL 0x40
@@ -3391,9 +2554,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SEEDAT 0xbc
#define SEECTL 0xbe
+#define SEEOP_EWDS 0x40
#define SEEOP_WALL 0x40
#define SEEOP_EWEN 0x40
-#define SEEOP_EWDS 0x40
#define SEEOPCODE 0x70
#define SEERST 0x02
#define SEESTART 0x01
@@ -3410,25 +2573,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SCBCNT 0xbf
-#define DFWADDR 0xc0
-
#define DSPFLTRCTL 0xc0
#define FLTRDISABLE 0x20
#define EDGESENSE 0x10
#define DSPFCNTSEL 0x0f
+#define DFWADDR 0xc0
+
#define DSPDATACTL 0xc1
#define BYPASSENAB 0x80
#define DESQDIS 0x10
#define RCVROFFSTDIS 0x04
#define XMITOFFSTDIS 0x02
-#define DFRADDR 0xc2
-
#define DSPREQCTL 0xc2
#define MANREQCTL 0xc0
#define MANREQDLY 0x3f
+#define DFRADDR 0xc2
+
#define DSPACKCTL 0xc3
#define MANACKCTL 0xc0
#define MANACKDLY 0x3f
@@ -3449,14 +2612,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define WRTBIASCALC 0xc7
-#define RCVRBIASCALC 0xc8
-
#define DFPTRS 0xc8
-#define SKEWCALC 0xc9
+#define RCVRBIASCALC 0xc8
#define DFBKPTR 0xc9
+#define SKEWCALC 0xc9
+
#define DFDBCTL 0xcb
#define DFF_CIO_WR_RDY 0x20
#define DFF_CIO_RD_RDY 0x10
@@ -3541,12 +2704,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define ACCUM_SAVE 0xfa
-#define WAITING_SCB_TAILS 0x100
-
#define AHD_PCI_CONFIG_BASE 0x100
#define SRAM_BASE 0x100
+#define WAITING_SCB_TAILS 0x100
+
#define WAITING_TID_HEAD 0x120
#define WAITING_TID_TAIL 0x122
@@ -3575,8 +2738,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define PRELOADEN 0x80
#define WIDEODD 0x40
#define SCSIEN 0x20
-#define SDMAEN 0x10
#define SDMAENACK 0x10
+#define SDMAEN 0x10
#define HDMAEN 0x08
#define HDMAENACK 0x08
#define DIRECTION 0x04
@@ -3674,12 +2837,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define MK_MESSAGE_SCSIID 0x162
-#define SCB_BASE 0x180
-
#define SCB_RESIDUAL_DATACNT 0x180
#define SCB_CDB_STORE 0x180
#define SCB_HOST_CDB_PTR 0x180
+#define SCB_BASE 0x180
+
#define SCB_RESIDUAL_SGPTR 0x184
#define SG_ADDR_MASK 0xf8
#define SG_OVERRUN_RESID 0x02
@@ -3747,6 +2910,17 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SCB_DISCONNECTED_LISTS 0x1b8
+#define CMD_GROUP_CODE_SHIFT 0x05
+#define STIMESEL_MIN 0x18
+#define STIMESEL_SHIFT 0x03
+#define INVALID_ADDR 0x80
+#define AHD_PRECOMP_MASK 0x07
+#define TARGET_DATA_IN 0x01
+#define CCSCBADDR_MAX 0x80
+#define NUMDSPS 0x14
+#define SEEOP_EWEN_ADDR 0xc0
+#define AHD_ANNEXCOL_PER_DEV0 0x04
+#define DST_MODE_SHIFT 0x04
#define AHD_TIMER_MAX_US 0x18ffe7
#define AHD_TIMER_MAX_TICKS 0xffff
#define AHD_SENSE_BUFSIZE 0x100
@@ -3781,43 +2955,32 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define LUNLEN_SINGLE_LEVEL_LUN 0x0f
#define NVRAM_SCB_OFFSET 0x2c
#define STATUS_PKT_SENSE 0xff
-#define CMD_GROUP_CODE_SHIFT 0x05
#define MAX_OFFSET_PACED_BUG 0x7f
#define STIMESEL_BUG_ADJ 0x08
-#define STIMESEL_MIN 0x18
-#define STIMESEL_SHIFT 0x03
#define CCSGRAM_MAXSEGS 0x10
-#define INVALID_ADDR 0x80
#define SEEOP_ERAL_ADDR 0x80
#define AHD_SLEWRATE_DEF_REVB 0x08
#define AHD_PRECOMP_CUTBACK_17 0x04
-#define AHD_PRECOMP_MASK 0x07
#define SRC_MODE_SHIFT 0x00
#define PKT_OVERRUN_BUFSIZE 0x200
#define SCB_TRANSFER_SIZE_1BYTE_LUN 0x30
-#define TARGET_DATA_IN 0x01
#define HOST_MSG 0xff
#define MAX_OFFSET 0xfe
#define BUS_16_BIT 0x01
-#define CCSCBADDR_MAX 0x80
-#define NUMDSPS 0x14
-#define SEEOP_EWEN_ADDR 0xc0
-#define AHD_ANNEXCOL_PER_DEV0 0x04
-#define DST_MODE_SHIFT 0x04
/* Downloaded Constant Definitions */
+#define SG_SIZEOF 0x04
+#define SG_PREFETCH_ALIGN_MASK 0x02
+#define SG_PREFETCH_CNT_LIMIT 0x01
#define CACHELINE_MASK 0x07
#define SCB_TRANSFER_SIZE 0x06
#define PKT_OVERRUN_BUFOFFSET 0x05
-#define SG_SIZEOF 0x04
#define SG_PREFETCH_ADDR_MASK 0x03
-#define SG_PREFETCH_ALIGN_MASK 0x02
-#define SG_PREFETCH_CNT_LIMIT 0x01
#define SG_PREFETCH_CNT 0x00
#define DOWNLOAD_CONST_COUNT 0x08
/* Exported Labels */
-#define LABEL_seq_isr 0x28f
#define LABEL_timer_isr 0x28b
+#define LABEL_seq_isr 0x28f
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
index db38a61a8cb4..c4c8a96bf5a3 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
@@ -8,7 +8,7 @@
#include "aic79xx_osm.h"
-static ahd_reg_parse_entry_t MODE_PTR_parse_table[] = {
+static const ahd_reg_parse_entry_t MODE_PTR_parse_table[] = {
{ "SRC_MODE", 0x07, 0x07 },
{ "DST_MODE", 0x70, 0x70 }
};
@@ -20,7 +20,7 @@ ahd_mode_ptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x00, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t INTSTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t INTSTAT_parse_table[] = {
{ "SPLTINT", 0x01, 0x01 },
{ "CMDCMPLT", 0x02, 0x02 },
{ "SEQINT", 0x04, 0x04 },
@@ -39,7 +39,7 @@ ahd_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x01, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEQINTCODE_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQINTCODE_parse_table[] = {
{ "NO_SEQINT", 0x00, 0xff },
{ "BAD_PHASE", 0x01, 0xff },
{ "SEND_REJECT", 0x02, 0xff },
@@ -76,7 +76,7 @@ ahd_seqintcode_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x02, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRINT_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRINT_parse_table[] = {
{ "CLRSPLTINT", 0x01, 0x01 },
{ "CLRCMDINT", 0x02, 0x02 },
{ "CLRSEQINT", 0x04, 0x04 },
@@ -94,7 +94,7 @@ ahd_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x03, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t ERROR_parse_table[] = {
+static const ahd_reg_parse_entry_t ERROR_parse_table[] = {
{ "DSCTMOUT", 0x02, 0x02 },
{ "ILLOPCODE", 0x04, 0x04 },
{ "SQPARERR", 0x08, 0x08 },
@@ -111,24 +111,7 @@ ahd_error_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x04, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRERR_parse_table[] = {
- { "CLRDSCTMOUT", 0x02, 0x02 },
- { "CLRILLOPCODE", 0x04, 0x04 },
- { "CLRSQPARERR", 0x08, 0x08 },
- { "CLRDPARERR", 0x10, 0x10 },
- { "CLRMPARERR", 0x20, 0x20 },
- { "CLRCIOACCESFAIL", 0x40, 0x40 },
- { "CLRCIOPARERR", 0x80, 0x80 }
-};
-
-int
-ahd_clrerr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CLRERR_parse_table, 7, "CLRERR",
- 0x04, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t HCNTRL_parse_table[] = {
+static const ahd_reg_parse_entry_t HCNTRL_parse_table[] = {
{ "CHIPRST", 0x01, 0x01 },
{ "CHIPRSTACK", 0x01, 0x01 },
{ "INTEN", 0x02, 0x02 },
@@ -160,7 +143,7 @@ ahd_hescb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x08, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
+static const ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
{ "ENINT_COALESCE", 0x40, 0x40 },
{ "HOST_TQINPOS", 0x80, 0x80 }
};
@@ -172,7 +155,7 @@ ahd_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEQINTSTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQINTSTAT_parse_table[] = {
{ "SEQ_SPLTINT", 0x01, 0x01 },
{ "SEQ_PCIINT", 0x02, 0x02 },
{ "SEQ_SCSIINT", 0x04, 0x04 },
@@ -187,7 +170,7 @@ ahd_seqintstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0c, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRSEQINTSTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRSEQINTSTAT_parse_table[] = {
{ "CLRSEQ_SPLTINT", 0x01, 0x01 },
{ "CLRSEQ_PCIINT", 0x02, 0x02 },
{ "CLRSEQ_SCSIINT", 0x04, 0x04 },
@@ -230,7 +213,7 @@ ahd_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x14, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
+static const ahd_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
{ "SCB_QSIZE_4", 0x00, 0x0f },
{ "SCB_QSIZE_8", 0x01, 0x0f },
{ "SCB_QSIZE_16", 0x02, 0x0f },
@@ -258,7 +241,7 @@ ahd_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x16, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t INTCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t INTCTL_parse_table[] = {
{ "SPLTINTEN", 0x01, 0x01 },
{ "SEQINTEN", 0x02, 0x02 },
{ "SCSIINTEN", 0x04, 0x04 },
@@ -276,7 +259,7 @@ ahd_intctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x18, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t DFCNTRL_parse_table[] = {
+static const ahd_reg_parse_entry_t DFCNTRL_parse_table[] = {
{ "DIRECTIONEN", 0x01, 0x01 },
{ "FIFOFLUSH", 0x02, 0x02 },
{ "FIFOFLUSHACK", 0x02, 0x02 },
@@ -297,7 +280,7 @@ ahd_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x19, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
+static const ahd_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
{ "CIOPARCKEN", 0x01, 0x01 },
{ "DISABLE_TWATE", 0x02, 0x02 },
{ "EXTREQLCK", 0x10, 0x10 },
@@ -313,7 +296,7 @@ ahd_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x19, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t DFSTATUS_parse_table[] = {
+static const ahd_reg_parse_entry_t DFSTATUS_parse_table[] = {
{ "FIFOEMP", 0x01, 0x01 },
{ "FIFOFULL", 0x02, 0x02 },
{ "DFTHRESH", 0x04, 0x04 },
@@ -330,7 +313,7 @@ ahd_dfstatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x1a, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
+static const ahd_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
{ "LAST_SEG_DONE", 0x01, 0x01 },
{ "LAST_SEG", 0x02, 0x02 },
{ "ODD_SEG", 0x04, 0x04 },
@@ -344,20 +327,7 @@ ahd_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x1b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t ARBCTL_parse_table[] = {
- { "USE_TIME", 0x07, 0x07 },
- { "RETRY_SWEN", 0x08, 0x08 },
- { "RESET_HARB", 0x80, 0x80 }
-};
-
-int
-ahd_arbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(ARBCTL_parse_table, 3, "ARBCTL",
- 0x1b, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
+static const ahd_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
{ "LAST_SEG", 0x02, 0x02 },
{ "ODD_SEG", 0x04, 0x04 },
{ "SG_ADDR_MASK", 0xf8, 0xf8 }
@@ -378,20 +348,6 @@ ahd_lqin_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_typeptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "TYPEPTR",
- 0x20, regvalue, cur_col, wrap));
-}
-
-int
-ahd_tagptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "TAGPTR",
- 0x21, regvalue, cur_col, wrap));
-}
-
-int
ahd_lunptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "LUNPTR",
@@ -399,20 +355,6 @@ ahd_lunptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_datalenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DATALENPTR",
- 0x23, regvalue, cur_col, wrap));
-}
-
-int
-ahd_statlenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "STATLENPTR",
- 0x24, regvalue, cur_col, wrap));
-}
-
-int
ahd_cmdlenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "CMDLENPTR",
@@ -448,13 +390,6 @@ ahd_qnextptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_idptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "IDPTR",
- 0x2a, regvalue, cur_col, wrap));
-}
-
-int
ahd_abrtbyteptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "ABRTBYTEPTR",
@@ -468,28 +403,7 @@ ahd_abrtbitptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x2c, regvalue, cur_col, wrap));
}
-int
-ahd_maxcmdbytes_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "MAXCMDBYTES",
- 0x2d, regvalue, cur_col, wrap));
-}
-
-int
-ahd_maxcmd2rcv_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "MAXCMD2RCV",
- 0x2e, regvalue, cur_col, wrap));
-}
-
-int
-ahd_shortthresh_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SHORTTHRESH",
- 0x2f, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t LUNLEN_parse_table[] = {
+static const ahd_reg_parse_entry_t LUNLEN_parse_table[] = {
{ "ILUNLEN", 0x0f, 0x0f },
{ "TLUNLEN", 0xf0, 0xf0 }
};
@@ -522,49 +436,7 @@ ahd_maxcmdcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x33, regvalue, cur_col, wrap));
}
-int
-ahd_lqrsvd01_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "LQRSVD01",
- 0x34, regvalue, cur_col, wrap));
-}
-
-int
-ahd_lqrsvd16_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "LQRSVD16",
- 0x35, regvalue, cur_col, wrap));
-}
-
-int
-ahd_lqrsvd17_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "LQRSVD17",
- 0x36, regvalue, cur_col, wrap));
-}
-
-int
-ahd_cmdrsvd0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CMDRSVD0",
- 0x37, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t LQCTL0_parse_table[] = {
- { "LQ0INITGCLT", 0x03, 0x03 },
- { "LQ0TARGCLT", 0x0c, 0x0c },
- { "LQIINITGCLT", 0x30, 0x30 },
- { "LQITARGCLT", 0xc0, 0xc0 }
-};
-
-int
-ahd_lqctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(LQCTL0_parse_table, 4, "LQCTL0",
- 0x38, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t LQCTL1_parse_table[] = {
+static const ahd_reg_parse_entry_t LQCTL1_parse_table[] = {
{ "ABORTPENDING", 0x01, 0x01 },
{ "SINGLECMD", 0x02, 0x02 },
{ "PCI2PCI", 0x04, 0x04 }
@@ -577,23 +449,7 @@ ahd_lqctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x38, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCSBIST0_parse_table[] = {
- { "OSBISTRUN", 0x01, 0x01 },
- { "OSBISTDONE", 0x02, 0x02 },
- { "OSBISTERR", 0x04, 0x04 },
- { "GSBISTRUN", 0x10, 0x10 },
- { "GSBISTDONE", 0x20, 0x20 },
- { "GSBISTERR", 0x40, 0x40 }
-};
-
-int
-ahd_scsbist0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SCSBIST0_parse_table, 6, "SCSBIST0",
- 0x39, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t LQCTL2_parse_table[] = {
+static const ahd_reg_parse_entry_t LQCTL2_parse_table[] = {
{ "LQOPAUSE", 0x01, 0x01 },
{ "LQOTOIDLE", 0x02, 0x02 },
{ "LQOCONTINUE", 0x04, 0x04 },
@@ -611,20 +467,7 @@ ahd_lqctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x39, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCSBIST1_parse_table[] = {
- { "NTBISTRUN", 0x01, 0x01 },
- { "NTBISTDONE", 0x02, 0x02 },
- { "NTBISTERR", 0x04, 0x04 }
-};
-
-int
-ahd_scsbist1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SCSBIST1_parse_table, 3, "SCSBIST1",
- 0x3a, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = {
{ "SCSIRSTO", 0x01, 0x01 },
{ "FORCEBUSFREE", 0x10, 0x10 },
{ "ENARBO", 0x20, 0x20 },
@@ -639,7 +482,7 @@ ahd_scsiseq0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3a, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCSISEQ1_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSISEQ1_parse_table[] = {
{ "ALTSTIM", 0x01, 0x01 },
{ "ENAUTOATNP", 0x02, 0x02 },
{ "MANUALP", 0x0c, 0x0c },
@@ -655,7 +498,7 @@ ahd_scsiseq1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SXFRCTL0_parse_table[] = {
+static const ahd_reg_parse_entry_t SXFRCTL0_parse_table[] = {
{ "SPIOEN", 0x08, 0x08 },
{ "BIOSCANCELEN", 0x10, 0x10 },
{ "DFPEXP", 0x40, 0x40 },
@@ -669,21 +512,7 @@ ahd_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3c, regvalue, cur_col, wrap));
}
-int
-ahd_dlcount_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DLCOUNT",
- 0x3c, regvalue, cur_col, wrap));
-}
-
-int
-ahd_businitid_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "BUSINITID",
- 0x3c, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SXFRCTL1_parse_table[] = {
+static const ahd_reg_parse_entry_t SXFRCTL1_parse_table[] = {
{ "STPWEN", 0x01, 0x01 },
{ "ACTNEGEN", 0x02, 0x02 },
{ "ENSTIMER", 0x04, 0x04 },
@@ -700,27 +529,7 @@ ahd_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3d, regvalue, cur_col, wrap));
}
-int
-ahd_bustargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "BUSTARGID",
- 0x3e, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SXFRCTL2_parse_table[] = {
- { "ASU", 0x07, 0x07 },
- { "CMDDMAEN", 0x08, 0x08 },
- { "AUTORSTDIS", 0x10, 0x10 }
-};
-
-int
-ahd_sxfrctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SXFRCTL2_parse_table, 3, "SXFRCTL2",
- 0x3e, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DFFSTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t DFFSTAT_parse_table[] = {
{ "CURRFIFO_0", 0x00, 0x03 },
{ "CURRFIFO_1", 0x01, 0x03 },
{ "CURRFIFO_NONE", 0x03, 0x03 },
@@ -736,7 +545,14 @@ ahd_dffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3f, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCSISIGO_parse_table[] = {
+int
+ahd_multargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(NULL, 0, "MULTARGID",
+ 0x40, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SCSISIGO_parse_table[] = {
{ "P_DATAOUT", 0x00, 0xe0 },
{ "P_DATAOUT_DT", 0x20, 0xe0 },
{ "P_DATAIN", 0x40, 0xe0 },
@@ -763,14 +579,7 @@ ahd_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x40, regvalue, cur_col, wrap));
}
-int
-ahd_multargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "MULTARGID",
- 0x40, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SCSISIGI_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSISIGI_parse_table[] = {
{ "P_DATAOUT", 0x00, 0xe0 },
{ "P_DATAOUT_DT", 0x20, 0xe0 },
{ "P_DATAIN", 0x40, 0xe0 },
@@ -797,7 +606,7 @@ ahd_scsisigi_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x41, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCSIPHASE_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSIPHASE_parse_table[] = {
{ "DATA_OUT_PHASE", 0x01, 0x03 },
{ "DATA_IN_PHASE", 0x02, 0x03 },
{ "DATA_PHASE_MASK", 0x03, 0x03 },
@@ -815,13 +624,6 @@ ahd_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_scsidat0_img_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCSIDAT0_IMG",
- 0x43, regvalue, cur_col, wrap));
-}
-
-int
ahd_scsidat_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "SCSIDAT",
@@ -835,7 +637,7 @@ ahd_scsibus_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x46, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t TARGIDIN_parse_table[] = {
+static const ahd_reg_parse_entry_t TARGIDIN_parse_table[] = {
{ "TARGID", 0x0f, 0x0f },
{ "CLKOUT", 0x80, 0x80 }
};
@@ -847,7 +649,7 @@ ahd_targidin_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x48, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SELID_parse_table[] = {
+static const ahd_reg_parse_entry_t SELID_parse_table[] = {
{ "ONEBIT", 0x08, 0x08 },
{ "SELID_MASK", 0xf0, 0xf0 }
};
@@ -859,7 +661,7 @@ ahd_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x49, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t OPTIONMODE_parse_table[] = {
+static const ahd_reg_parse_entry_t OPTIONMODE_parse_table[] = {
{ "AUTO_MSGOUT_DE", 0x02, 0x02 },
{ "ENDGFORMCHK", 0x04, 0x04 },
{ "BUSFREEREV", 0x10, 0x10 },
@@ -876,7 +678,7 @@ ahd_optionmode_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4a, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SBLKCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t SBLKCTL_parse_table[] = {
{ "SELWIDE", 0x02, 0x02 },
{ "ENAB20", 0x04, 0x04 },
{ "ENAB40", 0x08, 0x08 },
@@ -891,24 +693,7 @@ ahd_sblkctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4a, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRSINT0_parse_table[] = {
- { "CLRARBDO", 0x01, 0x01 },
- { "CLRSPIORDY", 0x02, 0x02 },
- { "CLROVERRUN", 0x04, 0x04 },
- { "CLRIOERR", 0x08, 0x08 },
- { "CLRSELINGO", 0x10, 0x10 },
- { "CLRSELDI", 0x20, 0x20 },
- { "CLRSELDO", 0x40, 0x40 }
-};
-
-int
-ahd_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CLRSINT0_parse_table, 7, "CLRSINT0",
- 0x4b, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SSTAT0_parse_table[] = {
+static const ahd_reg_parse_entry_t SSTAT0_parse_table[] = {
{ "ARBDO", 0x01, 0x01 },
{ "SPIORDY", 0x02, 0x02 },
{ "OVERRUN", 0x04, 0x04 },
@@ -926,7 +711,7 @@ ahd_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SIMODE0_parse_table[] = {
+static const ahd_reg_parse_entry_t SIMODE0_parse_table[] = {
{ "ENARBDO", 0x01, 0x01 },
{ "ENSPIORDY", 0x02, 0x02 },
{ "ENOVERRUN", 0x04, 0x04 },
@@ -943,24 +728,24 @@ ahd_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRSINT1_parse_table[] = {
- { "CLRREQINIT", 0x01, 0x01 },
- { "CLRSTRB2FAST", 0x02, 0x02 },
- { "CLRSCSIPERR", 0x04, 0x04 },
- { "CLRBUSFREE", 0x08, 0x08 },
- { "CLRSCSIRSTI", 0x20, 0x20 },
- { "CLRATNO", 0x40, 0x40 },
- { "CLRSELTIMEO", 0x80, 0x80 }
+static const ahd_reg_parse_entry_t CLRSINT0_parse_table[] = {
+ { "CLRARBDO", 0x01, 0x01 },
+ { "CLRSPIORDY", 0x02, 0x02 },
+ { "CLROVERRUN", 0x04, 0x04 },
+ { "CLRIOERR", 0x08, 0x08 },
+ { "CLRSELINGO", 0x10, 0x10 },
+ { "CLRSELDI", 0x20, 0x20 },
+ { "CLRSELDO", 0x40, 0x40 }
};
int
-ahd_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(CLRSINT1_parse_table, 7, "CLRSINT1",
- 0x4c, regvalue, cur_col, wrap));
+ return (ahd_print_register(CLRSINT0_parse_table, 7, "CLRSINT0",
+ 0x4b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SSTAT1_parse_table[] = {
+static const ahd_reg_parse_entry_t SSTAT1_parse_table[] = {
{ "REQINIT", 0x01, 0x01 },
{ "STRB2FAST", 0x02, 0x02 },
{ "SCSIPERR", 0x04, 0x04 },
@@ -978,7 +763,24 @@ ahd_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4c, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SSTAT2_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRSINT1_parse_table[] = {
+ { "CLRREQINIT", 0x01, 0x01 },
+ { "CLRSTRB2FAST", 0x02, 0x02 },
+ { "CLRSCSIPERR", 0x04, 0x04 },
+ { "CLRBUSFREE", 0x08, 0x08 },
+ { "CLRSCSIRSTI", 0x20, 0x20 },
+ { "CLRATNO", 0x40, 0x40 },
+ { "CLRSELTIMEO", 0x80, 0x80 }
+};
+
+int
+ahd_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(CLRSINT1_parse_table, 7, "CLRSINT1",
+ 0x4c, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SSTAT2_parse_table[] = {
{ "BUSFREE_LQO", 0x40, 0xc0 },
{ "BUSFREE_DFF0", 0x80, 0xc0 },
{ "BUSFREE_DFF1", 0xc0, 0xc0 },
@@ -998,20 +800,7 @@ ahd_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4d, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SIMODE2_parse_table[] = {
- { "ENDMADONE", 0x01, 0x01 },
- { "ENSDONE", 0x02, 0x02 },
- { "ENWIDE_RES", 0x04, 0x04 }
-};
-
-int
-ahd_simode2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SIMODE2_parse_table, 3, "SIMODE2",
- 0x4d, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CLRSINT2_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRSINT2_parse_table[] = {
{ "CLRDMADONE", 0x01, 0x01 },
{ "CLRSDONE", 0x02, 0x02 },
{ "CLRWIDE_RES", 0x04, 0x04 },
@@ -1025,7 +814,7 @@ ahd_clrsint2_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4d, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t PERRDIAG_parse_table[] = {
+static const ahd_reg_parse_entry_t PERRDIAG_parse_table[] = {
{ "DTERR", 0x01, 0x01 },
{ "DGFORMERR", 0x02, 0x02 },
{ "CRCERR", 0x04, 0x04 },
@@ -1064,7 +853,7 @@ ahd_lqostate_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4f, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQISTAT0_parse_table[] = {
+static const ahd_reg_parse_entry_t LQISTAT0_parse_table[] = {
{ "LQIATNCMD", 0x01, 0x01 },
{ "LQIATNLQ", 0x02, 0x02 },
{ "LQIBADLQT", 0x04, 0x04 },
@@ -1080,23 +869,7 @@ ahd_lqistat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x50, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRLQIINT0_parse_table[] = {
- { "CLRLQIATNCMD", 0x01, 0x01 },
- { "CLRLQIATNLQ", 0x02, 0x02 },
- { "CLRLQIBADLQT", 0x04, 0x04 },
- { "CLRLQICRCT2", 0x08, 0x08 },
- { "CLRLQICRCT1", 0x10, 0x10 },
- { "CLRLQIATNQAS", 0x20, 0x20 }
-};
-
-int
-ahd_clrlqiint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CLRLQIINT0_parse_table, 6, "CLRLQIINT0",
- 0x50, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t LQIMODE0_parse_table[] = {
+static const ahd_reg_parse_entry_t LQIMODE0_parse_table[] = {
{ "ENLQIATNCMD", 0x01, 0x01 },
{ "ENLQIATNLQ", 0x02, 0x02 },
{ "ENLQIBADLQT", 0x04, 0x04 },
@@ -1112,7 +885,23 @@ ahd_lqimode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x50, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQIMODE1_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRLQIINT0_parse_table[] = {
+ { "CLRLQIATNCMD", 0x01, 0x01 },
+ { "CLRLQIATNLQ", 0x02, 0x02 },
+ { "CLRLQIBADLQT", 0x04, 0x04 },
+ { "CLRLQICRCT2", 0x08, 0x08 },
+ { "CLRLQICRCT1", 0x10, 0x10 },
+ { "CLRLQIATNQAS", 0x20, 0x20 }
+};
+
+int
+ahd_clrlqiint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(CLRLQIINT0_parse_table, 6, "CLRLQIINT0",
+ 0x50, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t LQIMODE1_parse_table[] = {
{ "ENLQIOVERI_NLQ", 0x01, 0x01 },
{ "ENLQIOVERI_LQ", 0x02, 0x02 },
{ "ENLQIBADLQI", 0x04, 0x04 },
@@ -1130,7 +919,7 @@ ahd_lqimode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x51, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQISTAT1_parse_table[] = {
+static const ahd_reg_parse_entry_t LQISTAT1_parse_table[] = {
{ "LQIOVERI_NLQ", 0x01, 0x01 },
{ "LQIOVERI_LQ", 0x02, 0x02 },
{ "LQIBADLQI", 0x04, 0x04 },
@@ -1148,7 +937,7 @@ ahd_lqistat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x51, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRLQIINT1_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRLQIINT1_parse_table[] = {
{ "CLRLQIOVERI_NLQ", 0x01, 0x01 },
{ "CLRLQIOVERI_LQ", 0x02, 0x02 },
{ "CLRLQIBADLQI", 0x04, 0x04 },
@@ -1166,7 +955,7 @@ ahd_clrlqiint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x51, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQISTAT2_parse_table[] = {
+static const ahd_reg_parse_entry_t LQISTAT2_parse_table[] = {
{ "LQIGSAVAIL", 0x01, 0x01 },
{ "LQISTOPCMD", 0x02, 0x02 },
{ "LQISTOPLQ", 0x04, 0x04 },
@@ -1184,7 +973,7 @@ ahd_lqistat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x52, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SSTAT3_parse_table[] = {
+static const ahd_reg_parse_entry_t SSTAT3_parse_table[] = {
{ "OSRAMPERR", 0x01, 0x01 },
{ "NTRAMPERR", 0x02, 0x02 }
};
@@ -1196,7 +985,7 @@ ahd_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x53, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SIMODE3_parse_table[] = {
+static const ahd_reg_parse_entry_t SIMODE3_parse_table[] = {
{ "ENOSRAMPERR", 0x01, 0x01 },
{ "ENNTRAMPERR", 0x02, 0x02 }
};
@@ -1208,7 +997,7 @@ ahd_simode3_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x53, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRSINT3_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRSINT3_parse_table[] = {
{ "CLROSRAMPERR", 0x01, 0x01 },
{ "CLRNTRAMPERR", 0x02, 0x02 }
};
@@ -1220,7 +1009,7 @@ ahd_clrsint3_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x53, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = {
{ "LQOTCRC", 0x01, 0x01 },
{ "LQOATNPKT", 0x02, 0x02 },
{ "LQOATNLQ", 0x04, 0x04 },
@@ -1235,7 +1024,7 @@ ahd_lqostat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x54, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRLQOINT0_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRLQOINT0_parse_table[] = {
{ "CLRLQOTCRC", 0x01, 0x01 },
{ "CLRLQOATNPKT", 0x02, 0x02 },
{ "CLRLQOATNLQ", 0x04, 0x04 },
@@ -1250,7 +1039,7 @@ ahd_clrlqoint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x54, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQOMODE0_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOMODE0_parse_table[] = {
{ "ENLQOTCRC", 0x01, 0x01 },
{ "ENLQOATNPKT", 0x02, 0x02 },
{ "ENLQOATNLQ", 0x04, 0x04 },
@@ -1265,7 +1054,7 @@ ahd_lqomode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x54, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQOMODE1_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOMODE1_parse_table[] = {
{ "ENLQOPHACHGINPKT", 0x01, 0x01 },
{ "ENLQOBUSFREE", 0x02, 0x02 },
{ "ENLQOBADQAS", 0x04, 0x04 },
@@ -1280,7 +1069,7 @@ ahd_lqomode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x55, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = {
{ "LQOPHACHGINPKT", 0x01, 0x01 },
{ "LQOBUSFREE", 0x02, 0x02 },
{ "LQOBADQAS", 0x04, 0x04 },
@@ -1295,7 +1084,7 @@ ahd_lqostat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x55, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRLQOINT1_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRLQOINT1_parse_table[] = {
{ "CLRLQOPHACHGINPKT", 0x01, 0x01 },
{ "CLRLQOBUSFREE", 0x02, 0x02 },
{ "CLRLQOBADQAS", 0x04, 0x04 },
@@ -1310,7 +1099,7 @@ ahd_clrlqoint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x55, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = {
{ "LQOSTOP0", 0x01, 0x01 },
{ "LQOPHACHGOUTPKT", 0x02, 0x02 },
{ "LQOWAITFIFO", 0x10, 0x10 },
@@ -1331,7 +1120,7 @@ ahd_os_space_cnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x56, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SIMODE1_parse_table[] = {
+static const ahd_reg_parse_entry_t SIMODE1_parse_table[] = {
{ "ENREQINIT", 0x01, 0x01 },
{ "ENSTRB2FAST", 0x02, 0x02 },
{ "ENSCSIPERR", 0x04, 0x04 },
@@ -1356,7 +1145,7 @@ ahd_gsfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x58, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = {
{ "RSTCHN", 0x01, 0x01 },
{ "CLRCHN", 0x02, 0x02 },
{ "CLRSHCNT", 0x04, 0x04 },
@@ -1370,15 +1159,17 @@ ahd_dffsxfrctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5a, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQOSCSCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOSCSCTL_parse_table[] = {
{ "LQONOCHKOVER", 0x01, 0x01 },
+ { "LQONOHOLDLACK", 0x02, 0x02 },
+ { "LQOBUSETDLY", 0x40, 0x40 },
{ "LQOH2A_VERSION", 0x80, 0x80 }
};
int
ahd_lqoscsctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(LQOSCSCTL_parse_table, 2, "LQOSCSCTL",
+ return (ahd_print_register(LQOSCSCTL_parse_table, 4, "LQOSCSCTL",
0x5a, regvalue, cur_col, wrap));
}
@@ -1389,7 +1180,7 @@ ahd_nextscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5a, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRSEQINTSRC_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRSEQINTSRC_parse_table[] = {
{ "CLRCFG4TCMD", 0x01, 0x01 },
{ "CLRCFG4ICMD", 0x02, 0x02 },
{ "CLRCFG4TSTAT", 0x04, 0x04 },
@@ -1406,7 +1197,7 @@ ahd_clrseqintsrc_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = {
{ "CFG4TCMD", 0x01, 0x01 },
{ "CFG4ICMD", 0x02, 0x02 },
{ "CFG4TSTAT", 0x04, 0x04 },
@@ -1423,14 +1214,7 @@ ahd_seqintsrc_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5b, regvalue, cur_col, wrap));
}
-int
-ahd_currscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CURRSCB",
- 0x5c, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SEQIMODE_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQIMODE_parse_table[] = {
{ "ENCFG4TCMD", 0x01, 0x01 },
{ "ENCFG4ICMD", 0x02, 0x02 },
{ "ENCFG4TSTAT", 0x04, 0x04 },
@@ -1447,7 +1231,14 @@ ahd_seqimode_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5c, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = {
+int
+ahd_currscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(NULL, 0, "CURRSCB",
+ 0x5c, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = {
{ "FIFOFREE", 0x01, 0x01 },
{ "DATAINFIFO", 0x02, 0x02 },
{ "DLZERO", 0x04, 0x04 },
@@ -1464,24 +1255,6 @@ ahd_mdffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5d, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CRCCONTROL_parse_table[] = {
- { "CRCVALCHKEN", 0x40, 0x40 }
-};
-
-int
-ahd_crccontrol_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CRCCONTROL_parse_table, 1, "CRCCONTROL",
- 0x5d, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfftag_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DFFTAG",
- 0x5e, regvalue, cur_col, wrap));
-}
-
int
ahd_lastscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
@@ -1489,31 +1262,6 @@ ahd_lastscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5e, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCSITEST_parse_table[] = {
- { "SEL_TXPLL_DEBUG", 0x04, 0x04 },
- { "CNTRTEST", 0x08, 0x08 }
-};
-
-int
-ahd_scsitest_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SCSITEST_parse_table, 2, "SCSITEST",
- 0x5e, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t IOPDNCTL_parse_table[] = {
- { "PDN_DIFFSENSE", 0x01, 0x01 },
- { "PDN_IDIST", 0x04, 0x04 },
- { "DISABLE_OE", 0x80, 0x80 }
-};
-
-int
-ahd_iopdnctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(IOPDNCTL_parse_table, 3, "IOPDNCTL",
- 0x5f, regvalue, cur_col, wrap));
-}
-
int
ahd_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
@@ -1529,13 +1277,6 @@ ahd_negoaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_dgrpcrci_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DGRPCRCI",
- 0x60, regvalue, cur_col, wrap));
-}
-
-int
ahd_negperiod_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "NEGPERIOD",
@@ -1543,20 +1284,13 @@ ahd_negperiod_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_packcrci_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "PACKCRCI",
- 0x62, regvalue, cur_col, wrap));
-}
-
-int
ahd_negoffset_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "NEGOFFSET",
0x62, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t NEGPPROPTS_parse_table[] = {
+static const ahd_reg_parse_entry_t NEGPPROPTS_parse_table[] = {
{ "PPROPT_IUT", 0x01, 0x01 },
{ "PPROPT_DT", 0x02, 0x02 },
{ "PPROPT_QAS", 0x04, 0x04 },
@@ -1570,7 +1304,7 @@ ahd_negppropts_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x63, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t NEGCONOPTS_parse_table[] = {
+static const ahd_reg_parse_entry_t NEGCONOPTS_parse_table[] = {
{ "WIDEXFER", 0x01, 0x01 },
{ "ENAUTOATNO", 0x02, 0x02 },
{ "ENAUTOATNI", 0x04, 0x04 },
@@ -1601,20 +1335,21 @@ ahd_annexdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x66, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCSCHKN_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSCHKN_parse_table[] = {
{ "LSTSGCLRDIS", 0x01, 0x01 },
{ "SHVALIDSTDIS", 0x02, 0x02 },
{ "DFFACTCLR", 0x04, 0x04 },
{ "SDONEMSKDIS", 0x08, 0x08 },
{ "WIDERESEN", 0x10, 0x10 },
{ "CURRFIFODEF", 0x20, 0x20 },
- { "STSELSKIDDIS", 0x40, 0x40 }
+ { "STSELSKIDDIS", 0x40, 0x40 },
+ { "BIDICHKDIS", 0x80, 0x80 }
};
int
ahd_scschkn_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(SCSCHKN_parse_table, 7, "SCSCHKN",
+ return (ahd_print_register(SCSCHKN_parse_table, 8, "SCSCHKN",
0x66, regvalue, cur_col, wrap));
}
@@ -1625,23 +1360,6 @@ ahd_iownid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x67, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t PLL960CTL0_parse_table[] = {
- { "PLL_ENFBM", 0x01, 0x01 },
- { "PLL_DLPF", 0x02, 0x02 },
- { "PLL_ENLPF", 0x04, 0x04 },
- { "PLL_ENLUD", 0x08, 0x08 },
- { "PLL_NS", 0x30, 0x30 },
- { "PLL_PWDN", 0x40, 0x40 },
- { "PLL_VCOSEL", 0x80, 0x80 }
-};
-
-int
-ahd_pll960ctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(PLL960CTL0_parse_table, 7, "PLL960CTL0",
- 0x68, regvalue, cur_col, wrap));
-}
-
int
ahd_shcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
@@ -1656,33 +1374,6 @@ ahd_townid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x69, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t PLL960CTL1_parse_table[] = {
- { "PLL_RST", 0x01, 0x01 },
- { "PLL_CNTCLR", 0x40, 0x40 },
- { "PLL_CNTEN", 0x80, 0x80 }
-};
-
-int
-ahd_pll960ctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(PLL960CTL1_parse_table, 3, "PLL960CTL1",
- 0x69, regvalue, cur_col, wrap));
-}
-
-int
-ahd_pll960cnt0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "PLL960CNT0",
- 0x6a, regvalue, cur_col, wrap));
-}
-
-int
-ahd_xsig_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "XSIG",
- 0x6a, regvalue, cur_col, wrap));
-}
-
int
ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
@@ -1690,57 +1381,6 @@ ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x6b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t PLL400CTL0_parse_table[] = {
- { "PLL_ENFBM", 0x01, 0x01 },
- { "PLL_DLPF", 0x02, 0x02 },
- { "PLL_ENLPF", 0x04, 0x04 },
- { "PLL_ENLUD", 0x08, 0x08 },
- { "PLL_NS", 0x30, 0x30 },
- { "PLL_PWDN", 0x40, 0x40 },
- { "PLL_VCOSEL", 0x80, 0x80 }
-};
-
-int
-ahd_pll400ctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(PLL400CTL0_parse_table, 7, "PLL400CTL0",
- 0x6c, regvalue, cur_col, wrap));
-}
-
-int
-ahd_fairness_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "FAIRNESS",
- 0x6c, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t PLL400CTL1_parse_table[] = {
- { "PLL_RST", 0x01, 0x01 },
- { "PLL_CNTCLR", 0x40, 0x40 },
- { "PLL_CNTEN", 0x80, 0x80 }
-};
-
-int
-ahd_pll400ctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(PLL400CTL1_parse_table, 3, "PLL400CTL1",
- 0x6d, regvalue, cur_col, wrap));
-}
-
-int
-ahd_unfairness_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "UNFAIRNESS",
- 0x6e, regvalue, cur_col, wrap));
-}
-
-int
-ahd_pll400cnt0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "PLL400CNT0",
- 0x6e, regvalue, cur_col, wrap));
-}
-
int
ahd_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
@@ -1748,31 +1388,6 @@ ahd_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x70, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t PLLDELAY_parse_table[] = {
- { "SPLIT_DROP_REQ", 0x80, 0x80 }
-};
-
-int
-ahd_plldelay_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(PLLDELAY_parse_table, 1, "PLLDELAY",
- 0x70, regvalue, cur_col, wrap));
-}
-
-int
-ahd_hodmaadr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "HODMAADR",
- 0x70, regvalue, cur_col, wrap));
-}
-
-int
-ahd_hodmacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "HODMACNT",
- 0x78, regvalue, cur_col, wrap));
-}
-
int
ahd_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
@@ -1781,10 +1396,10 @@ ahd_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_hodmaen_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_sghaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(NULL, 0, "HODMAEN",
- 0x7a, regvalue, cur_col, wrap));
+ return (ahd_print_register(NULL, 0, "SGHADDR",
+ 0x7c, regvalue, cur_col, wrap));
}
int
@@ -1795,10 +1410,10 @@ ahd_scbhaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_sghaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_sghcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(NULL, 0, "SGHADDR",
- 0x7c, regvalue, cur_col, wrap));
+ return (ahd_print_register(NULL, 0, "SGHCNT",
+ 0x84, regvalue, cur_col, wrap));
}
int
@@ -1808,14 +1423,7 @@ ahd_scbhcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x84, regvalue, cur_col, wrap));
}
-int
-ahd_sghcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SGHCNT",
- 0x84, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DFF_THRSH_parse_table[] = {
+static const ahd_reg_parse_entry_t DFF_THRSH_parse_table[] = {
{ "WR_DFTHRSH_MIN", 0x00, 0x70 },
{ "RD_DFTHRSH_MIN", 0x00, 0x07 },
{ "RD_DFTHRSH_25", 0x01, 0x07 },
@@ -1843,209 +1451,7 @@ ahd_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x88, regvalue, cur_col, wrap));
}
-int
-ahd_romaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "ROMADDR",
- 0x8a, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t ROMCNTRL_parse_table[] = {
- { "RDY", 0x01, 0x01 },
- { "REPEAT", 0x02, 0x02 },
- { "ROMSPD", 0x18, 0x18 },
- { "ROMOP", 0xe0, 0xe0 }
-};
-
-int
-ahd_romcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(ROMCNTRL_parse_table, 4, "ROMCNTRL",
- 0x8d, regvalue, cur_col, wrap));
-}
-
-int
-ahd_romdata_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "ROMDATA",
- 0x8e, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCRXMSG0_parse_table[] = {
- { "CFNUM", 0x07, 0x07 },
- { "CDNUM", 0xf8, 0xf8 }
-};
-
-int
-ahd_cmcrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CMCRXMSG0_parse_table, 2, "CMCRXMSG0",
- 0x90, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t ROENABLE_parse_table[] = {
- { "DCH0ROEN", 0x01, 0x01 },
- { "DCH1ROEN", 0x02, 0x02 },
- { "SGROEN", 0x04, 0x04 },
- { "CMCROEN", 0x08, 0x08 },
- { "OVLYROEN", 0x10, 0x10 },
- { "MSIROEN", 0x20, 0x20 }
-};
-
-int
-ahd_roenable_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(ROENABLE_parse_table, 6, "ROENABLE",
- 0x90, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYRXMSG0_parse_table[] = {
- { "CFNUM", 0x07, 0x07 },
- { "CDNUM", 0xf8, 0xf8 }
-};
-
-int
-ahd_ovlyrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(OVLYRXMSG0_parse_table, 2, "OVLYRXMSG0",
- 0x90, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHRXMSG0_parse_table[] = {
- { "CFNUM", 0x07, 0x07 },
- { "CDNUM", 0xf8, 0xf8 }
-};
-
-int
-ahd_dchrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DCHRXMSG0_parse_table, 2, "DCHRXMSG0",
- 0x90, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYRXMSG1_parse_table[] = {
- { "CBNUM", 0xff, 0xff }
-};
-
-int
-ahd_ovlyrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(OVLYRXMSG1_parse_table, 1, "OVLYRXMSG1",
- 0x91, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t NSENABLE_parse_table[] = {
- { "DCH0NSEN", 0x01, 0x01 },
- { "DCH1NSEN", 0x02, 0x02 },
- { "SGNSEN", 0x04, 0x04 },
- { "CMCNSEN", 0x08, 0x08 },
- { "OVLYNSEN", 0x10, 0x10 },
- { "MSINSEN", 0x20, 0x20 }
-};
-
-int
-ahd_nsenable_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NSENABLE_parse_table, 6, "NSENABLE",
- 0x91, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCRXMSG1_parse_table[] = {
- { "CBNUM", 0xff, 0xff }
-};
-
-int
-ahd_cmcrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CMCRXMSG1_parse_table, 1, "CMCRXMSG1",
- 0x91, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHRXMSG1_parse_table[] = {
- { "CBNUM", 0xff, 0xff }
-};
-
-int
-ahd_dchrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DCHRXMSG1_parse_table, 1, "DCHRXMSG1",
- 0x91, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHRXMSG2_parse_table[] = {
- { "MINDEX", 0xff, 0xff }
-};
-
-int
-ahd_dchrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DCHRXMSG2_parse_table, 1, "DCHRXMSG2",
- 0x92, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCRXMSG2_parse_table[] = {
- { "MINDEX", 0xff, 0xff }
-};
-
-int
-ahd_cmcrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CMCRXMSG2_parse_table, 1, "CMCRXMSG2",
- 0x92, regvalue, cur_col, wrap));
-}
-
-int
-ahd_ost_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "OST",
- 0x92, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYRXMSG2_parse_table[] = {
- { "MINDEX", 0xff, 0xff }
-};
-
-int
-ahd_ovlyrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(OVLYRXMSG2_parse_table, 1, "OVLYRXMSG2",
- 0x92, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHRXMSG3_parse_table[] = {
- { "MCLASS", 0x0f, 0x0f }
-};
-
-int
-ahd_dchrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DCHRXMSG3_parse_table, 1, "DCHRXMSG3",
- 0x93, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYRXMSG3_parse_table[] = {
- { "MCLASS", 0x0f, 0x0f }
-};
-
-int
-ahd_ovlyrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(OVLYRXMSG3_parse_table, 1, "OVLYRXMSG3",
- 0x93, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCRXMSG3_parse_table[] = {
- { "MCLASS", 0x0f, 0x0f }
-};
-
-int
-ahd_cmcrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CMCRXMSG3_parse_table, 1, "CMCRXMSG3",
- 0x93, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t PCIXCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t PCIXCTL_parse_table[] = {
{ "CMPABCDIS", 0x01, 0x01 },
{ "TSCSERREN", 0x02, 0x02 },
{ "SRSPDPEEN", 0x04, 0x04 },
@@ -2062,46 +1468,7 @@ ahd_pcixctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x93, regvalue, cur_col, wrap));
}
-int
-ahd_ovlyseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "OVLYSEQBCNT",
- 0x94, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dchseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DCHSEQBCNT",
- 0x94, regvalue, cur_col, wrap));
-}
-
-int
-ahd_cmcseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CMCSEQBCNT",
- 0x94, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCSPLTSTAT0_parse_table[] = {
- { "RXSPLTRSP", 0x01, 0x01 },
- { "RXSCEMSG", 0x02, 0x02 },
- { "RXOVRUN", 0x04, 0x04 },
- { "CNTNOTCMPLT", 0x08, 0x08 },
- { "SCDATBUCKET", 0x10, 0x10 },
- { "SCADERR", 0x20, 0x20 },
- { "SCBCERR", 0x40, 0x40 },
- { "STAETERM", 0x80, 0x80 }
-};
-
-int
-ahd_cmcspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CMCSPLTSTAT0_parse_table, 8, "CMCSPLTSTAT0",
- 0x96, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHSPLTSTAT0_parse_table[] = {
+static const ahd_reg_parse_entry_t DCHSPLTSTAT0_parse_table[] = {
{ "RXSPLTRSP", 0x01, 0x01 },
{ "RXSCEMSG", 0x02, 0x02 },
{ "RXOVRUN", 0x04, 0x04 },
@@ -2119,47 +1486,7 @@ ahd_dchspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x96, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t OVLYSPLTSTAT0_parse_table[] = {
- { "RXSPLTRSP", 0x01, 0x01 },
- { "RXSCEMSG", 0x02, 0x02 },
- { "RXOVRUN", 0x04, 0x04 },
- { "CNTNOTCMPLT", 0x08, 0x08 },
- { "SCDATBUCKET", 0x10, 0x10 },
- { "SCADERR", 0x20, 0x20 },
- { "SCBCERR", 0x40, 0x40 },
- { "STAETERM", 0x80, 0x80 }
-};
-
-int
-ahd_ovlyspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(OVLYSPLTSTAT0_parse_table, 8, "OVLYSPLTSTAT0",
- 0x96, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCSPLTSTAT1_parse_table[] = {
- { "RXDATABUCKET", 0x01, 0x01 }
-};
-
-int
-ahd_cmcspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CMCSPLTSTAT1_parse_table, 1, "CMCSPLTSTAT1",
- 0x97, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYSPLTSTAT1_parse_table[] = {
- { "RXDATABUCKET", 0x01, 0x01 }
-};
-
-int
-ahd_ovlyspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(OVLYSPLTSTAT1_parse_table, 1, "OVLYSPLTSTAT1",
- 0x97, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHSPLTSTAT1_parse_table[] = {
+static const ahd_reg_parse_entry_t DCHSPLTSTAT1_parse_table[] = {
{ "RXDATABUCKET", 0x01, 0x01 }
};
@@ -2170,139 +1497,7 @@ ahd_dchspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x97, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SGRXMSG0_parse_table[] = {
- { "CFNUM", 0x07, 0x07 },
- { "CDNUM", 0xf8, 0xf8 }
-};
-
-int
-ahd_sgrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SGRXMSG0_parse_table, 2, "SGRXMSG0",
- 0x98, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTADR0_parse_table[] = {
- { "LOWER_ADDR", 0x7f, 0x7f }
-};
-
-int
-ahd_slvspltoutadr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SLVSPLTOUTADR0_parse_table, 1, "SLVSPLTOUTADR0",
- 0x98, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SGRXMSG1_parse_table[] = {
- { "CBNUM", 0xff, 0xff }
-};
-
-int
-ahd_sgrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SGRXMSG1_parse_table, 1, "SGRXMSG1",
- 0x99, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTADR1_parse_table[] = {
- { "REQ_FNUM", 0x07, 0x07 },
- { "REQ_DNUM", 0xf8, 0xf8 }
-};
-
-int
-ahd_slvspltoutadr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SLVSPLTOUTADR1_parse_table, 2, "SLVSPLTOUTADR1",
- 0x99, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SGRXMSG2_parse_table[] = {
- { "MINDEX", 0xff, 0xff }
-};
-
-int
-ahd_sgrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SGRXMSG2_parse_table, 1, "SGRXMSG2",
- 0x9a, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTADR2_parse_table[] = {
- { "REQ_BNUM", 0xff, 0xff }
-};
-
-int
-ahd_slvspltoutadr2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SLVSPLTOUTADR2_parse_table, 1, "SLVSPLTOUTADR2",
- 0x9a, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SGRXMSG3_parse_table[] = {
- { "MCLASS", 0x0f, 0x0f }
-};
-
-int
-ahd_sgrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SGRXMSG3_parse_table, 1, "SGRXMSG3",
- 0x9b, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTADR3_parse_table[] = {
- { "RLXORD", 0x10, 0x10 },
- { "TAG_NUM", 0x1f, 0x1f }
-};
-
-int
-ahd_slvspltoutadr3_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SLVSPLTOUTADR3_parse_table, 2, "SLVSPLTOUTADR3",
- 0x9b, regvalue, cur_col, wrap));
-}
-
-int
-ahd_sgseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SGSEQBCNT",
- 0x9c, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTATTR0_parse_table[] = {
- { "LOWER_BCNT", 0xff, 0xff }
-};
-
-int
-ahd_slvspltoutattr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SLVSPLTOUTATTR0_parse_table, 1, "SLVSPLTOUTATTR0",
- 0x9c, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTATTR1_parse_table[] = {
- { "CMPLT_FNUM", 0x07, 0x07 },
- { "CMPLT_DNUM", 0xf8, 0xf8 }
-};
-
-int
-ahd_slvspltoutattr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SLVSPLTOUTATTR1_parse_table, 2, "SLVSPLTOUTATTR1",
- 0x9d, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTATTR2_parse_table[] = {
- { "CMPLT_BNUM", 0xff, 0xff }
-};
-
-int
-ahd_slvspltoutattr2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SLVSPLTOUTATTR2_parse_table, 1, "SLVSPLTOUTATTR2",
- 0x9e, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SGSPLTSTAT0_parse_table[] = {
+static const ahd_reg_parse_entry_t SGSPLTSTAT0_parse_table[] = {
{ "RXSPLTRSP", 0x01, 0x01 },
{ "RXSCEMSG", 0x02, 0x02 },
{ "RXOVRUN", 0x04, 0x04 },
@@ -2320,7 +1515,7 @@ ahd_sgspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x9e, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SGSPLTSTAT1_parse_table[] = {
+static const ahd_reg_parse_entry_t SGSPLTSTAT1_parse_table[] = {
{ "RXDATABUCKET", 0x01, 0x01 }
};
@@ -2331,19 +1526,7 @@ ahd_sgspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x9f, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SFUNCT_parse_table[] = {
- { "TEST_NUM", 0x0f, 0x0f },
- { "TEST_GROUP", 0xf0, 0xf0 }
-};
-
-int
-ahd_sfunct_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SFUNCT_parse_table, 2, "SFUNCT",
- 0x9f, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DF0PCISTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t DF0PCISTAT_parse_table[] = {
{ "DPR", 0x01, 0x01 },
{ "TWATERR", 0x02, 0x02 },
{ "RDPERR", 0x04, 0x04 },
@@ -2368,83 +1551,6 @@ ahd_reg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xa0, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t DF1PCISTAT_parse_table[] = {
- { "DPR", 0x01, 0x01 },
- { "TWATERR", 0x02, 0x02 },
- { "RDPERR", 0x04, 0x04 },
- { "SCAAPERR", 0x08, 0x08 },
- { "RTA", 0x10, 0x10 },
- { "RMA", 0x20, 0x20 },
- { "SSE", 0x40, 0x40 },
- { "DPE", 0x80, 0x80 }
-};
-
-int
-ahd_df1pcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DF1PCISTAT_parse_table, 8, "DF1PCISTAT",
- 0xa1, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SGPCISTAT_parse_table[] = {
- { "DPR", 0x01, 0x01 },
- { "RDPERR", 0x04, 0x04 },
- { "SCAAPERR", 0x08, 0x08 },
- { "RTA", 0x10, 0x10 },
- { "RMA", 0x20, 0x20 },
- { "SSE", 0x40, 0x40 },
- { "DPE", 0x80, 0x80 }
-};
-
-int
-ahd_sgpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SGPCISTAT_parse_table, 7, "SGPCISTAT",
- 0xa2, regvalue, cur_col, wrap));
-}
-
-int
-ahd_reg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "REG1",
- 0xa2, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCPCISTAT_parse_table[] = {
- { "DPR", 0x01, 0x01 },
- { "TWATERR", 0x02, 0x02 },
- { "RDPERR", 0x04, 0x04 },
- { "SCAAPERR", 0x08, 0x08 },
- { "RTA", 0x10, 0x10 },
- { "RMA", 0x20, 0x20 },
- { "SSE", 0x40, 0x40 },
- { "DPE", 0x80, 0x80 }
-};
-
-int
-ahd_cmcpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CMCPCISTAT_parse_table, 8, "CMCPCISTAT",
- 0xa3, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYPCISTAT_parse_table[] = {
- { "DPR", 0x01, 0x01 },
- { "RDPERR", 0x04, 0x04 },
- { "SCAAPERR", 0x08, 0x08 },
- { "RTA", 0x10, 0x10 },
- { "RMA", 0x20, 0x20 },
- { "SSE", 0x40, 0x40 },
- { "DPE", 0x80, 0x80 }
-};
-
-int
-ahd_ovlypcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(OVLYPCISTAT_parse_table, 7, "OVLYPCISTAT",
- 0xa4, regvalue, cur_col, wrap));
-}
-
int
ahd_reg_isr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
@@ -2452,7 +1558,7 @@ ahd_reg_isr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xa4, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SG_STATE_parse_table[] = {
+static const ahd_reg_parse_entry_t SG_STATE_parse_table[] = {
{ "SEGS_AVAIL", 0x01, 0x01 },
{ "LOADING_NEEDED", 0x02, 0x02 },
{ "FETCH_INPROG", 0x04, 0x04 }
@@ -2465,23 +1571,7 @@ ahd_sg_state_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xa6, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t MSIPCISTAT_parse_table[] = {
- { "DPR", 0x01, 0x01 },
- { "TWATERR", 0x02, 0x02 },
- { "CLRPENDMSI", 0x08, 0x08 },
- { "RTA", 0x10, 0x10 },
- { "RMA", 0x20, 0x20 },
- { "SSE", 0x40, 0x40 }
-};
-
-int
-ahd_msipcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(MSIPCISTAT_parse_table, 6, "MSIPCISTAT",
- 0xa6, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t TARGPCISTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t TARGPCISTAT_parse_table[] = {
{ "TWATERR", 0x02, 0x02 },
{ "STA", 0x08, 0x08 },
{ "SSE", 0x40, 0x40 },
@@ -2496,27 +1586,13 @@ ahd_targpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_data_count_odd_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DATA_COUNT_ODD",
- 0xa7, regvalue, cur_col, wrap));
-}
-
-int
ahd_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "SCBPTR",
0xa8, regvalue, cur_col, wrap));
}
-int
-ahd_ccscbacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CCSCBACNT",
- 0xab, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SCBAUTOPTR_parse_table[] = {
+static const ahd_reg_parse_entry_t SCBAUTOPTR_parse_table[] = {
{ "SCBPTR_OFF", 0x07, 0x07 },
{ "SCBPTR_ADDR", 0x38, 0x38 },
{ "AUSCBPTR_EN", 0x80, 0x80 }
@@ -2537,36 +1613,13 @@ ahd_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_ccscbadr_bk_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CCSCBADR_BK",
- 0xac, regvalue, cur_col, wrap));
-}
-
-int
ahd_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "CCSCBADDR",
0xac, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CMC_RAMBIST_parse_table[] = {
- { "CMC_BUFFER_BIST_EN", 0x01, 0x01 },
- { "CMC_BUFFER_BIST_FAIL",0x02, 0x02 },
- { "SG_BIST_EN", 0x10, 0x10 },
- { "SG_BIST_FAIL", 0x20, 0x20 },
- { "SCBRAMBIST_FAIL", 0x40, 0x40 },
- { "SG_ELEMENT_SIZE", 0x80, 0x80 }
-};
-
-int
-ahd_cmc_rambist_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CMC_RAMBIST_parse_table, 6, "CMC_RAMBIST",
- 0xad, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = {
{ "CCSCBRESET", 0x01, 0x01 },
{ "CCSCBDIR", 0x04, 0x04 },
{ "CCSCBEN", 0x08, 0x08 },
@@ -2582,7 +1635,7 @@ ahd_ccscbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xad, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CCSGCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t CCSGCTL_parse_table[] = {
{ "CCSGRESET", 0x01, 0x01 },
{ "SG_FETCH_REQ", 0x02, 0x02 },
{ "CCSGENACK", 0x08, 0x08 },
@@ -2606,13 +1659,6 @@ ahd_ccsgram_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_flexadr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "FLEXADR",
- 0xb0, regvalue, cur_col, wrap));
-}
-
-int
ahd_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "CCSCBRAM",
@@ -2620,39 +1666,13 @@ ahd_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_flexcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "FLEXCNT",
- 0xb3, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t FLEXDMASTAT_parse_table[] = {
- { "FLEXDMADONE", 0x01, 0x01 },
- { "FLEXDMAERR", 0x02, 0x02 }
-};
-
-int
-ahd_flexdmastat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(FLEXDMASTAT_parse_table, 2, "FLEXDMASTAT",
- 0xb5, regvalue, cur_col, wrap));
-}
-
-int
-ahd_flexdata_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "FLEXDATA",
- 0xb6, regvalue, cur_col, wrap));
-}
-
-int
ahd_brddat_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "BRDDAT",
0xb8, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t BRDCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t BRDCTL_parse_table[] = {
{ "BRDSTB", 0x01, 0x01 },
{ "BRDRW", 0x02, 0x02 },
{ "BRDEN", 0x04, 0x04 },
@@ -2682,7 +1702,7 @@ ahd_seedat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xbc, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEECTL_parse_table[] = {
+static const ahd_reg_parse_entry_t SEECTL_parse_table[] = {
{ "SEEOP_ERAL", 0x40, 0x70 },
{ "SEEOP_WRITE", 0x50, 0x70 },
{ "SEEOP_READ", 0x60, 0x70 },
@@ -2702,7 +1722,7 @@ ahd_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xbe, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEESTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t SEESTAT_parse_table[] = {
{ "SEESTART", 0x01, 0x01 },
{ "SEEBUSY", 0x02, 0x02 },
{ "SEEARBACK", 0x04, 0x04 },
@@ -2718,34 +1738,7 @@ ahd_seestat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xbe, regvalue, cur_col, wrap));
}
-int
-ahd_scbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCBCNT",
- 0xbf, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfwaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DFWADDR",
- 0xc0, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DSPFLTRCTL_parse_table[] = {
- { "DSPFCNTSEL", 0x0f, 0x0f },
- { "EDGESENSE", 0x10, 0x10 },
- { "FLTRDISABLE", 0x20, 0x20 }
-};
-
-int
-ahd_dspfltrctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DSPFLTRCTL_parse_table, 3, "DSPFLTRCTL",
- 0xc0, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DSPDATACTL_parse_table[] = {
+static const ahd_reg_parse_entry_t DSPDATACTL_parse_table[] = {
{ "XMITOFFSTDIS", 0x02, 0x02 },
{ "RCVROFFSTDIS", 0x04, 0x04 },
{ "DESQDIS", 0x10, 0x10 },
@@ -2760,44 +1753,13 @@ ahd_dspdatactl_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_dfraddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DFRADDR",
- 0xc2, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DSPREQCTL_parse_table[] = {
- { "MANREQDLY", 0x3f, 0x3f },
- { "MANREQCTL", 0xc0, 0xc0 }
-};
-
-int
-ahd_dspreqctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DSPREQCTL_parse_table, 2, "DSPREQCTL",
- 0xc2, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DSPACKCTL_parse_table[] = {
- { "MANACKDLY", 0x3f, 0x3f },
- { "MANACKCTL", 0xc0, 0xc0 }
-};
-
-int
-ahd_dspackctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DSPACKCTL_parse_table, 2, "DSPACKCTL",
- 0xc3, regvalue, cur_col, wrap));
-}
-
-int
ahd_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "DFDAT",
0xc4, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t DSPSELECT_parse_table[] = {
+static const ahd_reg_parse_entry_t DSPSELECT_parse_table[] = {
{ "DSPSEL", 0x1f, 0x1f },
{ "AUTOINCEN", 0x80, 0x80 }
};
@@ -2809,7 +1771,7 @@ ahd_dspselect_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xc4, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t WRTBIASCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t WRTBIASCTL_parse_table[] = {
{ "XMITMANVAL", 0x3f, 0x3f },
{ "AUTOXBCDIS", 0x80, 0x80 }
};
@@ -2821,91 +1783,7 @@ ahd_wrtbiasctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xc5, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t RCVRBIOSCTL_parse_table[] = {
- { "RCVRMANVAL", 0x3f, 0x3f },
- { "AUTORBCDIS", 0x80, 0x80 }
-};
-
-int
-ahd_rcvrbiosctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(RCVRBIOSCTL_parse_table, 2, "RCVRBIOSCTL",
- 0xc6, regvalue, cur_col, wrap));
-}
-
-int
-ahd_wrtbiascalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "WRTBIASCALC",
- 0xc7, regvalue, cur_col, wrap));
-}
-
-int
-ahd_rcvrbiascalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "RCVRBIASCALC",
- 0xc8, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfptrs_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DFPTRS",
- 0xc8, regvalue, cur_col, wrap));
-}
-
-int
-ahd_skewcalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SKEWCALC",
- 0xc9, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfbkptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DFBKPTR",
- 0xc9, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DFDBCTL_parse_table[] = {
- { "DFF_RAMBIST_EN", 0x01, 0x01 },
- { "DFF_RAMBIST_DONE", 0x02, 0x02 },
- { "DFF_RAMBIST_FAIL", 0x04, 0x04 },
- { "DFF_DIR_ERR", 0x08, 0x08 },
- { "DFF_CIO_RD_RDY", 0x10, 0x10 },
- { "DFF_CIO_WR_RDY", 0x20, 0x20 }
-};
-
-int
-ahd_dfdbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DFDBCTL_parse_table, 6, "DFDBCTL",
- 0xcb, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfscnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DFSCNT",
- 0xcc, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DFBCNT",
- 0xce, regvalue, cur_col, wrap));
-}
-
-int
-ahd_ovlyaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "OVLYADDR",
- 0xd4, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SEQCTL0_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQCTL0_parse_table[] = {
{ "LOADRAM", 0x01, 0x01 },
{ "SEQRESET", 0x02, 0x02 },
{ "STEP", 0x04, 0x04 },
@@ -2923,21 +1801,7 @@ ahd_seqctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xd6, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEQCTL1_parse_table[] = {
- { "RAMBIST_EN", 0x01, 0x01 },
- { "RAMBIST_FAIL", 0x02, 0x02 },
- { "RAMBIST_DONE", 0x04, 0x04 },
- { "OVRLAY_DATA_CHK", 0x08, 0x08 }
-};
-
-int
-ahd_seqctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SEQCTL1_parse_table, 4, "SEQCTL1",
- 0xd7, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t FLAGS_parse_table[] = {
+static const ahd_reg_parse_entry_t FLAGS_parse_table[] = {
{ "CARRY", 0x01, 0x01 },
{ "ZERO", 0x02, 0x02 }
};
@@ -2949,7 +1813,7 @@ ahd_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xd8, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = {
{ "IRET", 0x01, 0x01 },
{ "INTMASK1", 0x02, 0x02 },
{ "INTMASK2", 0x04, 0x04 },
@@ -3002,24 +1866,6 @@ ahd_dindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_brkaddr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "BRKADDR0",
- 0xe6, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t BRKADDR1_parse_table[] = {
- { "BRKDIS", 0x80, 0x80 }
-};
-
-int
-ahd_brkaddr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(BRKADDR1_parse_table, 1, "BRKADDR1",
- 0xe6, regvalue, cur_col, wrap));
-}
-
-int
ahd_allones_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "ALLONES",
@@ -3055,13 +1901,6 @@ ahd_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_function1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "FUNCTION1",
- 0xf0, regvalue, cur_col, wrap));
-}
-
-int
ahd_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "STACK",
@@ -3083,13 +1922,6 @@ ahd_curaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_lastaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "LASTADDR",
- 0xf6, regvalue, cur_col, wrap));
-}
-
-int
ahd_intvec2_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "INTVEC2_ADDR",
@@ -3111,23 +1943,16 @@ ahd_accum_save_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_waiting_scb_tails_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "WAITING_SCB_TAILS",
- 0x100, regvalue, cur_col, wrap));
-}
-
-int
-ahd_ahd_pci_config_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(NULL, 0, "AHD_PCI_CONFIG_BASE",
+ return (ahd_print_register(NULL, 0, "SRAM_BASE",
0x100, regvalue, cur_col, wrap));
}
int
-ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_waiting_scb_tails_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(NULL, 0, "SRAM_BASE",
+ return (ahd_print_register(NULL, 0, "WAITING_SCB_TAILS",
0x100, regvalue, cur_col, wrap));
}
@@ -3215,7 +2040,7 @@ ahd_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x137, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t DMAPARAMS_parse_table[] = {
+static const ahd_reg_parse_entry_t DMAPARAMS_parse_table[] = {
{ "FIFORESET", 0x01, 0x01 },
{ "FIFOFLUSH", 0x02, 0x02 },
{ "DIRECTION", 0x04, 0x04 },
@@ -3235,7 +2060,7 @@ ahd_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x138, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
{ "NO_DISCONNECT", 0x01, 0x01 },
{ "SPHASE_PENDING", 0x02, 0x02 },
{ "DPHASE_PENDING", 0x04, 0x04 },
@@ -3268,7 +2093,7 @@ ahd_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x13b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LASTPHASE_parse_table[] = {
+static const ahd_reg_parse_entry_t LASTPHASE_parse_table[] = {
{ "P_DATAOUT", 0x00, 0xe0 },
{ "P_DATAOUT_DT", 0x20, 0xe0 },
{ "P_DATAIN", 0x40, 0xe0 },
@@ -3326,7 +2151,7 @@ ahd_qoutfifo_next_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x144, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t ARG_1_parse_table[] = {
+static const ahd_reg_parse_entry_t ARG_1_parse_table[] = {
{ "CONT_MSG_LOOP_TARG", 0x02, 0x02 },
{ "CONT_MSG_LOOP_READ", 0x03, 0x03 },
{ "CONT_MSG_LOOP_WRITE",0x04, 0x04 },
@@ -3358,7 +2183,7 @@ ahd_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x14a, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
{ "ALTSTIM", 0x01, 0x01 },
{ "ENAUTOATNP", 0x02, 0x02 },
{ "MANUALP", 0x0c, 0x0c },
@@ -3381,7 +2206,7 @@ ahd_initiator_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x14c, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
{ "PENDING_MK_MESSAGE", 0x01, 0x01 },
{ "TARGET_MSG_PENDING", 0x02, 0x02 },
{ "SELECTOUT_QFROZEN", 0x04, 0x04 }
@@ -3465,20 +2290,20 @@ ahd_mk_message_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_scb_residual_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(NULL, 0, "SCB_BASE",
+ return (ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT",
0x180, regvalue, cur_col, wrap));
}
int
-ahd_scb_residual_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT",
+ return (ahd_print_register(NULL, 0, "SCB_BASE",
0x180, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCB_RESIDUAL_SGPTR_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_RESIDUAL_SGPTR_parse_table[] = {
{ "SG_LIST_NULL", 0x01, 0x01 },
{ "SG_OVERRUN_RESID", 0x02, 0x02 },
{ "SG_ADDR_MASK", 0xf8, 0xf8 }
@@ -3499,27 +2324,6 @@ ahd_scb_scsi_status_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_scb_target_phases_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_TARGET_PHASES",
- 0x189, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scb_target_data_dir_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_TARGET_DATA_DIR",
- 0x18a, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scb_target_itag_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_TARGET_ITAG",
- 0x18b, regvalue, cur_col, wrap));
-}
-
-int
ahd_scb_sense_busaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "SCB_SENSE_BUSADDR",
@@ -3533,7 +2337,7 @@ ahd_scb_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x190, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
{ "SCB_TAG_TYPE", 0x03, 0x03 },
{ "DISCONNECTED", 0x04, 0x04 },
{ "STATUS_RCVD", 0x08, 0x08 },
@@ -3550,7 +2354,7 @@ ahd_scb_control_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x192, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
{ "OID", 0x0f, 0x0f },
{ "TID", 0xf0, 0xf0 }
};
@@ -3562,7 +2366,7 @@ ahd_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x193, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCB_LUN_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_LUN_parse_table[] = {
{ "LID", 0xff, 0xff }
};
@@ -3573,7 +2377,7 @@ ahd_scb_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x194, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCB_TASK_ATTRIBUTE_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_TASK_ATTRIBUTE_parse_table[] = {
{ "SCB_XFERLEN_ODD", 0x01, 0x01 }
};
@@ -3584,7 +2388,7 @@ ahd_scb_task_attribute_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x195, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCB_CDB_LEN_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_CDB_LEN_parse_table[] = {
{ "SCB_CDB_LEN_PTR", 0x80, 0x80 }
};
@@ -3609,7 +2413,7 @@ ahd_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x198, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
{ "SG_HIGH_ADDR_BITS", 0x7f, 0x7f },
{ "SG_LAST_SEG", 0x80, 0x80 }
};
@@ -3621,7 +2425,7 @@ ahd_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x1a0, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
{ "SG_LIST_NULL", 0x01, 0x01 },
{ "SG_FULL_RESID", 0x02, 0x02 },
{ "SG_STATUS_VALID", 0x04, 0x04 }
@@ -3656,13 +2460,6 @@ ahd_scb_next2_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_scb_spare_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_SPARE",
- 0x1b0, regvalue, cur_col, wrap));
-}
-
-int
ahd_scb_disconnected_lists_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "SCB_DISCONNECTED_LISTS",
diff --git a/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped b/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
index 11bed07e90b7..4b51e232392f 100644
--- a/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
@@ -5,7 +5,7 @@
* $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $
* $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $
*/
-static uint8_t seqprog[] = {
+static const uint8_t seqprog[] = {
0xff, 0x02, 0x06, 0x78,
0x00, 0xea, 0x6e, 0x59,
0x01, 0xea, 0x04, 0x30,
@@ -1027,7 +1027,7 @@ ahd_patch0_func(struct ahd_softc *ahd)
return (0);
}
-static struct patch {
+static const struct patch {
ahd_patch_func_t *patch_func;
uint32_t begin :10,
skip_instr :10,
@@ -1166,7 +1166,7 @@ static struct patch {
{ ahd_patch23_func, 815, 11, 1 }
};
-static struct cs {
+static const struct cs {
uint16_t begin;
uint16_t end;
} critical_sections[] = {
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index c0344e617651..e4e651cca3e4 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -736,7 +736,7 @@ struct ahc_syncrate {
#define ST_SXFR 0x010 /* Rate Single Transition Only */
#define DT_SXFR 0x040 /* Rate Double Transition Only */
uint8_t period; /* Period to send to SCSI target */
- char *rate;
+ const char *rate;
};
/* Safe and valid period for async negotiations. */
@@ -1114,7 +1114,7 @@ typedef int (ahc_device_setup_t)(struct ahc_softc *);
struct ahc_pci_identity {
uint64_t full_id;
uint64_t id_mask;
- char *name;
+ const char *name;
ahc_device_setup_t *setup;
};
@@ -1133,15 +1133,11 @@ extern const int ahc_num_aic7770_devs;
/*************************** Function Declarations ****************************/
/******************************************************************************/
-u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl);
-void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl);
-void ahc_busy_tcl(struct ahc_softc *ahc,
- u_int tcl, u_int busyid);
/***************************** PCI Front End *********************************/
-struct ahc_pci_identity *ahc_find_pci_device(ahc_dev_softc_t);
+const struct ahc_pci_identity *ahc_find_pci_device(ahc_dev_softc_t);
int ahc_pci_config(struct ahc_softc *,
- struct ahc_pci_identity *);
+ const struct ahc_pci_identity *);
int ahc_pci_test_register_access(struct ahc_softc *);
#ifdef CONFIG_PM
void ahc_pci_resume(struct ahc_softc *ahc);
@@ -1155,9 +1151,6 @@ int aic7770_config(struct ahc_softc *ahc,
/************************** SCB and SCB queue management **********************/
int ahc_probe_scbs(struct ahc_softc *);
-void ahc_run_untagged_queues(struct ahc_softc *ahc);
-void ahc_run_untagged_queue(struct ahc_softc *ahc,
- struct scb_tailq *queue);
void ahc_qinfifo_requeue_tail(struct ahc_softc *ahc,
struct scb *scb);
int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb,
@@ -1178,22 +1171,8 @@ int ahc_resume(struct ahc_softc *ahc);
#endif
void ahc_set_unit(struct ahc_softc *, int);
void ahc_set_name(struct ahc_softc *, char *);
-void ahc_alloc_scbs(struct ahc_softc *ahc);
void ahc_free(struct ahc_softc *ahc);
int ahc_reset(struct ahc_softc *ahc, int reinit);
-void ahc_shutdown(void *arg);
-
-/*************************** Interrupt Services *******************************/
-void ahc_clear_intstat(struct ahc_softc *ahc);
-void ahc_run_qoutfifo(struct ahc_softc *ahc);
-#ifdef AHC_TARGET_MODE
-void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused);
-#endif
-void ahc_handle_brkadrint(struct ahc_softc *ahc);
-void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat);
-void ahc_handle_scsiint(struct ahc_softc *ahc,
- u_int intstat);
-void ahc_clear_critical_section(struct ahc_softc *ahc);
/***************************** Error Recovery *********************************/
typedef enum {
@@ -1214,36 +1193,19 @@ int ahc_search_disc_list(struct ahc_softc *ahc, int target,
char channel, int lun, u_int tag,
int stop_on_first, int remove,
int save_state);
-void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
int ahc_reset_channel(struct ahc_softc *ahc, char channel,
int initiate_reset);
-int ahc_abort_scbs(struct ahc_softc *ahc, int target,
- char channel, int lun, u_int tag,
- role_t role, uint32_t status);
-void ahc_restart(struct ahc_softc *ahc);
-void ahc_calc_residual(struct ahc_softc *ahc,
- struct scb *scb);
+
/*************************** Utility Functions ********************************/
-struct ahc_phase_table_entry*
- ahc_lookup_phase_entry(int phase);
void ahc_compile_devinfo(struct ahc_devinfo *devinfo,
u_int our_id, u_int target,
u_int lun, char channel,
role_t role);
/************************** Transfer Negotiation ******************************/
-struct ahc_syncrate* ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
+const struct ahc_syncrate* ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
u_int *ppr_options, u_int maxsync);
u_int ahc_find_period(struct ahc_softc *ahc,
u_int scsirate, u_int maxsync);
-void ahc_validate_offset(struct ahc_softc *ahc,
- struct ahc_initiator_tinfo *tinfo,
- struct ahc_syncrate *syncrate,
- u_int *offset, int wide,
- role_t role);
-void ahc_validate_width(struct ahc_softc *ahc,
- struct ahc_initiator_tinfo *tinfo,
- u_int *bus_width,
- role_t role);
/*
* Negotiation types. These are used to qualify if we should renegotiate
* even if our goal and current transport parameters are identical.
@@ -1263,7 +1225,7 @@ void ahc_set_width(struct ahc_softc *ahc,
u_int width, u_int type, int paused);
void ahc_set_syncrate(struct ahc_softc *ahc,
struct ahc_devinfo *devinfo,
- struct ahc_syncrate *syncrate,
+ const struct ahc_syncrate *syncrate,
u_int period, u_int offset,
u_int ppr_options,
u_int type, int paused);
@@ -1305,11 +1267,10 @@ extern uint32_t ahc_debug;
#define AHC_SHOW_MASKED_ERRORS 0x1000
#define AHC_DEBUG_SEQUENCER 0x2000
#endif
-void ahc_print_scb(struct scb *scb);
void ahc_print_devinfo(struct ahc_softc *ahc,
struct ahc_devinfo *dev);
void ahc_dump_card_state(struct ahc_softc *ahc);
-int ahc_print_register(ahc_reg_parse_entry_t *table,
+int ahc_print_register(const ahc_reg_parse_entry_t *table,
u_int num_entries,
const char *name,
u_int address,
diff --git a/drivers/scsi/aic7xxx/aic7xxx.reg b/drivers/scsi/aic7xxx/aic7xxx.reg
index e196d83b93c7..0d2f763c3427 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.reg
+++ b/drivers/scsi/aic7xxx/aic7xxx.reg
@@ -238,6 +238,7 @@ register SXFRCTL2 {
register OPTIONMODE {
address 0x008
access_mode RW
+ count 2
field AUTORATEEN 0x80
field AUTOACKEN 0x40
field ATNMGMNTEN 0x20
@@ -254,6 +255,7 @@ register TARGCRCCNT {
address 0x00a
size 2
access_mode RW
+ count 2
}
/*
@@ -344,6 +346,7 @@ register SSTAT2 {
register SSTAT3 {
address 0x00e
access_mode RO
+ count 2
mask SCSICNT 0xf0
mask OFFCNT 0x0f
mask U2OFFCNT 0x7f
@@ -367,6 +370,7 @@ register SCSIID_ULTRA2 {
register SIMODE0 {
address 0x010
access_mode RW
+ count 2
field ENSELDO 0x40
field ENSELDI 0x20
field ENSELINGO 0x10
@@ -429,6 +433,7 @@ register SHADDR {
register SELTIMER {
address 0x018
access_mode RW
+ count 1
field STAGE6 0x20
field STAGE5 0x10
field STAGE4 0x08
@@ -467,6 +472,7 @@ register TARGID {
address 0x01b
size 2
access_mode RW
+ count 14
}
/*
@@ -480,6 +486,7 @@ register TARGID {
register SPIOCAP {
address 0x01b
access_mode RW
+ count 10
field SOFT1 0x80
field SOFT0 0x40
field SOFTCMDEN 0x20
@@ -492,6 +499,7 @@ register SPIOCAP {
register BRDCTL {
address 0x01d
+ count 11
field BRDDAT7 0x80
field BRDDAT6 0x40
field BRDDAT5 0x20
@@ -534,6 +542,7 @@ register BRDCTL {
*/
register SEECTL {
address 0x01e
+ count 11
field EXTARBACK 0x80
field EXTARBREQ 0x40
field SEEMS 0x20
@@ -570,6 +579,7 @@ register SBLKCTL {
register SEQCTL {
address 0x060
access_mode RW
+ count 15
field PERRORDIS 0x80
field PAUSEDIS 0x40
field FAILDIS 0x20
@@ -590,6 +600,7 @@ register SEQCTL {
register SEQRAM {
address 0x061
access_mode RW
+ count 2
}
/*
@@ -604,6 +615,7 @@ register SEQADDR0 {
register SEQADDR1 {
address 0x063
access_mode RW
+ count 8
mask SEQADDR1_MASK 0x01
}
@@ -649,6 +661,7 @@ register NONE {
register FLAGS {
address 0x06b
access_mode RO
+ count 18
field ZERO 0x02
field CARRY 0x01
}
@@ -671,6 +684,7 @@ register FUNCTION1 {
register STACK {
address 0x06f
access_mode RO
+ count 5
}
const STACK_SIZE 4
@@ -692,6 +706,7 @@ register BCTL {
register DSCOMMAND0 {
address 0x084
access_mode RW
+ count 7
field CACHETHEN 0x80 /* Cache Threshold enable */
field DPARCKEN 0x40 /* Data Parity Check Enable */
field MPARCKEN 0x20 /* Memory Parity Check Enable */
@@ -717,6 +732,7 @@ register DSCOMMAND1 {
register BUSTIME {
address 0x085
access_mode RW
+ count 2
mask BOFF 0xf0
mask BON 0x0f
}
@@ -727,6 +743,7 @@ register BUSTIME {
register BUSSPD {
address 0x086
access_mode RW
+ count 2
mask DFTHRSH 0xc0
mask STBOFF 0x38
mask STBON 0x07
@@ -737,6 +754,7 @@ register BUSSPD {
/* aic7850/55/60/70/80/95 only */
register DSPCISTATUS {
address 0x086
+ count 4
mask DFTHRSH_100 0xc0
}
@@ -758,6 +776,7 @@ const SEQ_MAILBOX_SHIFT 0
register HCNTRL {
address 0x087
access_mode RW
+ count 14
field POWRDN 0x40
field SWINT 0x10
field IRQMS 0x08
@@ -869,6 +888,7 @@ register INTSTAT {
register ERROR {
address 0x092
access_mode RO
+ count 26
field CIOPARERR 0x80 /* Ultra2 only */
field PCIERRSTAT 0x40 /* PCI only */
field MPARERR 0x20 /* PCI only */
@@ -885,6 +905,7 @@ register ERROR {
register CLRINT {
address 0x092
access_mode WO
+ count 24
field CLRPARERR 0x10 /* PCI only */
field CLRBRKADRINT 0x08
field CLRSCSIINT 0x04
@@ -943,6 +964,7 @@ register DFDAT {
register SCBCNT {
address 0x09a
access_mode RW
+ count 1
field SCBAUTO 0x80
mask SCBCNT_MASK 0x1f
}
@@ -954,6 +976,7 @@ register SCBCNT {
register QINFIFO {
address 0x09b
access_mode RW
+ count 12
}
/*
@@ -972,11 +995,13 @@ register QINCNT {
register QOUTFIFO {
address 0x09d
access_mode WO
+ count 7
}
register CRCCONTROL1 {
address 0x09d
access_mode RW
+ count 3
field CRCONSEEN 0x80
field CRCVALCHKEN 0x40
field CRCENDCHKEN 0x20
@@ -1013,6 +1038,7 @@ register SCSIPHASE {
register SFUNCT {
address 0x09f
access_mode RW
+ count 4
field ALT_MODE 0x80
}
@@ -1095,6 +1121,7 @@ scb {
}
SCB_SCSIOFFSET {
size 1
+ count 1
}
SCB_NEXT {
size 1
@@ -1118,6 +1145,7 @@ const SG_SIZEOF 0x08 /* sizeof(struct ahc_dma) */
register SEECTL_2840 {
address 0x0c0
access_mode RW
+ count 2
field CS_2840 0x04
field CK_2840 0x02
field DO_2840 0x01
@@ -1126,6 +1154,7 @@ register SEECTL_2840 {
register STATUS_2840 {
address 0x0c1
access_mode RW
+ count 4
field EEPROM_TF 0x80
mask BIOS_SEL 0x60
mask ADSEL 0x1e
@@ -1161,6 +1190,7 @@ register CCSGCTL {
register CCSCBCNT {
address 0xEF
+ count 1
}
register CCSCBCTL {
@@ -1187,6 +1217,7 @@ register CCSCBRAM {
register SCBBADDR {
address 0x0F0
access_mode RW
+ count 3
}
register CCSCBPTR {
@@ -1195,6 +1226,7 @@ register CCSCBPTR {
register HNSCB_QOFF {
address 0x0F4
+ count 4
}
register SNSCB_QOFF {
@@ -1234,6 +1266,7 @@ register DFF_THRSH {
mask WR_DFTHRSH_85 0x50
mask WR_DFTHRSH_90 0x60
mask WR_DFTHRSH_MAX 0x70
+ count 4
}
register SG_CACHE_PRE {
@@ -1287,6 +1320,7 @@ scratch_ram {
ULTRA_ENB {
alias CMDSIZE_TABLE
size 2
+ count 2
}
/*
* Bit vector of targets that have disconnection disabled as set by
@@ -1296,6 +1330,7 @@ scratch_ram {
*/
DISC_DSB {
size 2
+ count 6
}
CMDSIZE_TABLE_TAIL {
size 4
@@ -1323,6 +1358,7 @@ scratch_ram {
/* Parameters for DMA Logic */
DMAPARAMS {
size 1
+ count 12
field PRELOADEN 0x80
field WIDEODD 0x40
field SCSIEN 0x20
@@ -1436,11 +1472,12 @@ scratch_ram {
KERNEL_TQINPOS {
size 1
}
- TQINPOS {
+ TQINPOS {
size 1
}
ARG_1 {
size 1
+ count 1
mask SEND_MSG 0x80
mask SEND_SENSE 0x40
mask SEND_REJ 0x20
@@ -1495,6 +1532,7 @@ scratch_ram {
size 1
field HA_274_EXTENDED_TRANS 0x01
alias INITIATOR_TAG
+ count 1
}
SEQ_FLAGS2 {
@@ -1518,6 +1556,7 @@ scratch_ram {
*/
SCSICONF {
size 1
+ count 12
field TERM_ENB 0x80
field RESET_SCSI 0x40
field ENSPCHK 0x20
@@ -1527,16 +1566,19 @@ scratch_ram {
INTDEF {
address 0x05c
size 1
+ count 1
field EDGE_TRIG 0x80
mask VECTOR 0x0f
}
HOSTCONF {
address 0x05d
size 1
+ count 1
}
HA_274_BIOSCTRL {
address 0x05f
size 1
+ count 1
mask BIOSMODE 0x30
mask BIOSDISABLED 0x30
field CHANNEL_B_PRIMARY 0x08
@@ -1552,6 +1594,7 @@ scratch_ram {
*/
TARG_OFFSET {
size 16
+ count 1
}
}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
index 3cb07e114e89..dd11999b77b6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
@@ -84,16 +84,16 @@ struct seeprom_cmd {
};
/* Short opcodes for the c46 */
-static struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
-static struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
+static const struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
+static const struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
/* Long opcodes for the C56/C66 */
-static struct seeprom_cmd seeprom_long_ewen = {11, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
-static struct seeprom_cmd seeprom_long_ewds = {11, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
+static const struct seeprom_cmd seeprom_long_ewen = {11, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
+static const struct seeprom_cmd seeprom_long_ewds = {11, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
/* Common opcodes */
-static struct seeprom_cmd seeprom_write = {3, {1, 0, 1}};
-static struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
+static const struct seeprom_cmd seeprom_write = {3, {1, 0, 1}};
+static const struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
/*
* Wait for the SEERDY to go high; about 800 ns.
@@ -108,7 +108,7 @@ static struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
* Send a START condition and the given command
*/
static void
-send_seeprom_cmd(struct seeprom_descriptor *sd, struct seeprom_cmd *cmd)
+send_seeprom_cmd(struct seeprom_descriptor *sd, const struct seeprom_cmd *cmd)
{
uint8_t temp;
int i = 0;
@@ -227,7 +227,7 @@ int
ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
u_int start_addr, u_int count)
{
- struct seeprom_cmd *ewen, *ewds;
+ const struct seeprom_cmd *ewen, *ewds;
uint16_t v;
uint8_t temp;
int i, k;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 64e62ce59c15..0ae2b4605d09 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -51,8 +51,7 @@
#endif
/***************************** Lookup Tables **********************************/
-char *ahc_chip_names[] =
-{
+static const char *const ahc_chip_names[] = {
"NONE",
"aic7770",
"aic7850",
@@ -75,10 +74,10 @@ static const u_int num_chip_names = ARRAY_SIZE(ahc_chip_names);
*/
struct ahc_hard_error_entry {
uint8_t errno;
- char *errmesg;
+ const char *errmesg;
};
-static struct ahc_hard_error_entry ahc_hard_errors[] = {
+static const struct ahc_hard_error_entry ahc_hard_errors[] = {
{ ILLHADDR, "Illegal Host Access" },
{ ILLSADDR, "Illegal Sequencer Address referrenced" },
{ ILLOPCODE, "Illegal Opcode in sequencer program" },
@@ -90,7 +89,7 @@ static struct ahc_hard_error_entry ahc_hard_errors[] = {
};
static const u_int num_errors = ARRAY_SIZE(ahc_hard_errors);
-static struct ahc_phase_table_entry ahc_phase_table[] =
+static const struct ahc_phase_table_entry ahc_phase_table[] =
{
{ P_DATAOUT, MSG_NOOP, "in Data-out phase" },
{ P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
@@ -115,7 +114,7 @@ static const u_int num_phases = ARRAY_SIZE(ahc_phase_table) - 1;
* Provides a mapping of tranfer periods in ns to the proper value to
* stick in the scsixfer reg.
*/
-static struct ahc_syncrate ahc_syncrates[] =
+static const struct ahc_syncrate ahc_syncrates[] =
{
/* ultra2 fast/ultra period rate */
{ 0x42, 0x000, 9, "80.0" },
@@ -148,7 +147,7 @@ static struct ahc_tmode_tstate*
static void ahc_free_tstate(struct ahc_softc *ahc,
u_int scsi_id, char channel, int force);
#endif
-static struct ahc_syncrate*
+static const struct ahc_syncrate*
ahc_devlimited_syncrate(struct ahc_softc *ahc,
struct ahc_initiator_tinfo *,
u_int *period,
@@ -204,9 +203,9 @@ static void ahc_setup_target_msgin(struct ahc_softc *ahc,
#endif
static bus_dmamap_callback_t ahc_dmamap_cb;
-static void ahc_build_free_scb_list(struct ahc_softc *ahc);
-static int ahc_init_scbdata(struct ahc_softc *ahc);
-static void ahc_fini_scbdata(struct ahc_softc *ahc);
+static void ahc_build_free_scb_list(struct ahc_softc *ahc);
+static int ahc_init_scbdata(struct ahc_softc *ahc);
+static void ahc_fini_scbdata(struct ahc_softc *ahc);
static void ahc_qinfifo_requeue(struct ahc_softc *ahc,
struct scb *prev_scb,
struct scb *scb);
@@ -222,7 +221,7 @@ static void ahc_dumpseq(struct ahc_softc *ahc);
#endif
static int ahc_loadseq(struct ahc_softc *ahc);
static int ahc_check_patch(struct ahc_softc *ahc,
- struct patch **start_patch,
+ const struct patch **start_patch,
u_int start_instr, u_int *skip_addr);
static void ahc_download_instr(struct ahc_softc *ahc,
u_int instrptr, uint8_t *dconsts);
@@ -237,11 +236,582 @@ static void ahc_update_scsiid(struct ahc_softc *ahc,
static int ahc_handle_target_cmd(struct ahc_softc *ahc,
struct target_cmd *cmd);
#endif
+
+static u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl);
+static void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl);
+static void ahc_busy_tcl(struct ahc_softc *ahc,
+ u_int tcl, u_int busyid);
+
+/************************** SCB and SCB queue management **********************/
+static void ahc_run_untagged_queues(struct ahc_softc *ahc);
+static void ahc_run_untagged_queue(struct ahc_softc *ahc,
+ struct scb_tailq *queue);
+
+/****************************** Initialization ********************************/
+static void ahc_alloc_scbs(struct ahc_softc *ahc);
+static void ahc_shutdown(void *arg);
+
+/*************************** Interrupt Services *******************************/
+static void ahc_clear_intstat(struct ahc_softc *ahc);
+static void ahc_run_qoutfifo(struct ahc_softc *ahc);
+#ifdef AHC_TARGET_MODE
+static void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused);
+#endif
+static void ahc_handle_brkadrint(struct ahc_softc *ahc);
+static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat);
+static void ahc_handle_scsiint(struct ahc_softc *ahc,
+ u_int intstat);
+static void ahc_clear_critical_section(struct ahc_softc *ahc);
+
+/***************************** Error Recovery *********************************/
+static void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
+static int ahc_abort_scbs(struct ahc_softc *ahc, int target,
+ char channel, int lun, u_int tag,
+ role_t role, uint32_t status);
+static void ahc_calc_residual(struct ahc_softc *ahc,
+ struct scb *scb);
+
+/*********************** Untagged Transaction Routines ************************/
+static inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc);
+static inline void ahc_release_untagged_queues(struct ahc_softc *ahc);
+
+/*
+ * Block our completion routine from starting the next untagged
+ * transaction for this target or target lun.
+ */
+static inline void
+ahc_freeze_untagged_queues(struct ahc_softc *ahc)
+{
+ if ((ahc->flags & AHC_SCB_BTT) == 0)
+ ahc->untagged_queue_lock++;
+}
+
+/*
+ * Allow the next untagged transaction for this target or target lun
+ * to be executed. We use a counting semaphore to allow the lock
+ * to be acquired recursively. Once the count drops to zero, the
+ * transaction queues will be run.
+ */
+static inline void
+ahc_release_untagged_queues(struct ahc_softc *ahc)
+{
+ if ((ahc->flags & AHC_SCB_BTT) == 0) {
+ ahc->untagged_queue_lock--;
+ if (ahc->untagged_queue_lock == 0)
+ ahc_run_untagged_queues(ahc);
+ }
+}
+
/************************* Sequencer Execution Control ************************/
/*
- * Restart the sequencer program from address zero
+ * Work around any chip bugs related to halting sequencer execution.
+ * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
+ * reading a register that will set this signal and deassert it.
+ * Without this workaround, if the chip is paused, by an interrupt or
+ * manual pause while accessing scb ram, accesses to certain registers
+ * will hang the system (infinite pci retries).
+ */
+static void
+ahc_pause_bug_fix(struct ahc_softc *ahc)
+{
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ (void)ahc_inb(ahc, CCSCBCTL);
+}
+
+/*
+ * Determine whether the sequencer has halted code execution.
+ * Returns non-zero status if the sequencer is stopped.
+ */
+int
+ahc_is_paused(struct ahc_softc *ahc)
+{
+ return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0);
+}
+
+/*
+ * Request that the sequencer stop and wait, indefinitely, for it
+ * to stop. The sequencer will only acknowledge that it is paused
+ * once it has reached an instruction boundary and PAUSEDIS is
+ * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
+ * for critical sections.
+ */
+void
+ahc_pause(struct ahc_softc *ahc)
+{
+ ahc_outb(ahc, HCNTRL, ahc->pause);
+
+ /*
+ * Since the sequencer can disable pausing in a critical section, we
+ * must loop until it actually stops.
+ */
+ while (ahc_is_paused(ahc) == 0)
+ ;
+
+ ahc_pause_bug_fix(ahc);
+}
+
+/*
+ * Allow the sequencer to continue program execution.
+ * We check here to ensure that no additional interrupt
+ * sources that would cause the sequencer to halt have been
+ * asserted. If, for example, a SCSI bus reset is detected
+ * while we are fielding a different, pausing, interrupt type,
+ * we don't want to release the sequencer before going back
+ * into our interrupt handler and dealing with this new
+ * condition.
+ */
+void
+ahc_unpause(struct ahc_softc *ahc)
+{
+ if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0)
+ ahc_outb(ahc, HCNTRL, ahc->unpause);
+}
+
+/************************** Memory mapping routines ***************************/
+static struct ahc_dma_seg *
+ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
+{
+ int sg_index;
+
+ sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
+ /* sg_list_phys points to entry 1, not 0 */
+ sg_index++;
+
+ return (&scb->sg_list[sg_index]);
+}
+
+static uint32_t
+ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
+{
+ int sg_index;
+
+ /* sg_list_phys points to entry 1, not 0 */
+ sg_index = sg - &scb->sg_list[1];
+
+ return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
+}
+
+static uint32_t
+ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
+{
+ return (ahc->scb_data->hscb_busaddr
+ + (sizeof(struct hardware_scb) * index));
+}
+
+static void
+ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op)
+{
+ ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat,
+ ahc->scb_data->hscb_dmamap,
+ /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb),
+ /*len*/sizeof(*scb->hscb), op);
+}
+
+void
+ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op)
+{
+ if (scb->sg_count == 0)
+ return;
+
+ ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap,
+ /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr)
+ * sizeof(struct ahc_dma_seg),
+ /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op);
+}
+
+#ifdef AHC_TARGET_MODE
+static uint32_t
+ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index)
+{
+ return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo);
+}
+#endif
+
+/*********************** Miscelaneous Support Functions ***********************/
+/*
+ * Determine whether the sequencer reported a residual
+ * for this SCB/transaction.
+ */
+static void
+ahc_update_residual(struct ahc_softc *ahc, struct scb *scb)
+{
+ uint32_t sgptr;
+
+ sgptr = ahc_le32toh(scb->hscb->sgptr);
+ if ((sgptr & SG_RESID_VALID) != 0)
+ ahc_calc_residual(ahc, scb);
+}
+
+/*
+ * Return pointers to the transfer negotiation information
+ * for the specified our_id/remote_id pair.
+ */
+struct ahc_initiator_tinfo *
+ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
+ u_int remote_id, struct ahc_tmode_tstate **tstate)
+{
+ /*
+ * Transfer data structures are stored from the perspective
+ * of the target role. Since the parameters for a connection
+ * in the initiator role to a given target are the same as
+ * when the roles are reversed, we pretend we are the target.
+ */
+ if (channel == 'B')
+ our_id += 8;
+ *tstate = ahc->enabled_targets[our_id];
+ return (&(*tstate)->transinfo[remote_id]);
+}
+
+uint16_t
+ahc_inw(struct ahc_softc *ahc, u_int port)
+{
+ uint16_t r = ahc_inb(ahc, port+1) << 8;
+ return r | ahc_inb(ahc, port);
+}
+
+void
+ahc_outw(struct ahc_softc *ahc, u_int port, u_int value)
+{
+ ahc_outb(ahc, port, value & 0xFF);
+ ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
+}
+
+uint32_t
+ahc_inl(struct ahc_softc *ahc, u_int port)
+{
+ return ((ahc_inb(ahc, port))
+ | (ahc_inb(ahc, port+1) << 8)
+ | (ahc_inb(ahc, port+2) << 16)
+ | (ahc_inb(ahc, port+3) << 24));
+}
+
+void
+ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value)
+{
+ ahc_outb(ahc, port, (value) & 0xFF);
+ ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF);
+ ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF);
+ ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF);
+}
+
+uint64_t
+ahc_inq(struct ahc_softc *ahc, u_int port)
+{
+ return ((ahc_inb(ahc, port))
+ | (ahc_inb(ahc, port+1) << 8)
+ | (ahc_inb(ahc, port+2) << 16)
+ | (ahc_inb(ahc, port+3) << 24)
+ | (((uint64_t)ahc_inb(ahc, port+4)) << 32)
+ | (((uint64_t)ahc_inb(ahc, port+5)) << 40)
+ | (((uint64_t)ahc_inb(ahc, port+6)) << 48)
+ | (((uint64_t)ahc_inb(ahc, port+7)) << 56));
+}
+
+void
+ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value)
+{
+ ahc_outb(ahc, port, value & 0xFF);
+ ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
+ ahc_outb(ahc, port+2, (value >> 16) & 0xFF);
+ ahc_outb(ahc, port+3, (value >> 24) & 0xFF);
+ ahc_outb(ahc, port+4, (value >> 32) & 0xFF);
+ ahc_outb(ahc, port+5, (value >> 40) & 0xFF);
+ ahc_outb(ahc, port+6, (value >> 48) & 0xFF);
+ ahc_outb(ahc, port+7, (value >> 56) & 0xFF);
+}
+
+/*
+ * Get a free scb. If there are none, see if we can allocate a new SCB.
+ */
+struct scb *
+ahc_get_scb(struct ahc_softc *ahc)
+{
+ struct scb *scb;
+
+ if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) {
+ ahc_alloc_scbs(ahc);
+ scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
+ if (scb == NULL)
+ return (NULL);
+ }
+ SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
+ return (scb);
+}
+
+/*
+ * Return an SCB resource to the free list.
+ */
+void
+ahc_free_scb(struct ahc_softc *ahc, struct scb *scb)
+{
+ struct hardware_scb *hscb;
+
+ hscb = scb->hscb;
+ /* Clean up for the next user */
+ ahc->scb_data->scbindex[hscb->tag] = NULL;
+ scb->flags = SCB_FREE;
+ hscb->control = 0;
+
+ SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
+
+ /* Notify the OSM that a resource is now available. */
+ ahc_platform_scb_free(ahc, scb);
+}
+
+struct scb *
+ahc_lookup_scb(struct ahc_softc *ahc, u_int tag)
+{
+ struct scb* scb;
+
+ scb = ahc->scb_data->scbindex[tag];
+ if (scb != NULL)
+ ahc_sync_scb(ahc, scb,
+ BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ return (scb);
+}
+
+static void
+ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb)
+{
+ struct hardware_scb *q_hscb;
+ u_int saved_tag;
+
+ /*
+ * Our queuing method is a bit tricky. The card
+ * knows in advance which HSCB to download, and we
+ * can't disappoint it. To achieve this, the next
+ * SCB to download is saved off in ahc->next_queued_scb.
+ * When we are called to queue "an arbitrary scb",
+ * we copy the contents of the incoming HSCB to the one
+ * the sequencer knows about, swap HSCB pointers and
+ * finally assign the SCB to the tag indexed location
+ * in the scb_array. This makes sure that we can still
+ * locate the correct SCB by SCB_TAG.
+ */
+ q_hscb = ahc->next_queued_scb->hscb;
+ saved_tag = q_hscb->tag;
+ memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
+ if ((scb->flags & SCB_CDB32_PTR) != 0) {
+ q_hscb->shared_data.cdb_ptr =
+ ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag)
+ + offsetof(struct hardware_scb, cdb32));
+ }
+ q_hscb->tag = saved_tag;
+ q_hscb->next = scb->hscb->tag;
+
+ /* Now swap HSCB pointers. */
+ ahc->next_queued_scb->hscb = scb->hscb;
+ scb->hscb = q_hscb;
+
+ /* Now define the mapping from tag to SCB in the scbindex */
+ ahc->scb_data->scbindex[scb->hscb->tag] = scb;
+}
+
+/*
+ * Tell the sequencer about a new transaction to execute.
*/
void
+ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
+{
+ ahc_swap_with_next_hscb(ahc, scb);
+
+ if (scb->hscb->tag == SCB_LIST_NULL
+ || scb->hscb->next == SCB_LIST_NULL)
+ panic("Attempt to queue invalid SCB tag %x:%x\n",
+ scb->hscb->tag, scb->hscb->next);
+
+ /*
+ * Setup data "oddness".
+ */
+ scb->hscb->lun &= LID;
+ if (ahc_get_transfer_length(scb) & 0x1)
+ scb->hscb->lun |= SCB_XFERLEN_ODD;
+
+ /*
+ * Keep a history of SCBs we've downloaded in the qinfifo.
+ */
+ ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
+
+ /*
+ * Make sure our data is consistent from the
+ * perspective of the adapter.
+ */
+ ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+ /* Tell the adapter about the newly queued SCB */
+ if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+ ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
+ } else {
+ if ((ahc->features & AHC_AUTOPAUSE) == 0)
+ ahc_pause(ahc);
+ ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
+ if ((ahc->features & AHC_AUTOPAUSE) == 0)
+ ahc_unpause(ahc);
+ }
+}
+
+struct scsi_sense_data *
+ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb)
+{
+ int offset;
+
+ offset = scb - ahc->scb_data->scbarray;
+ return (&ahc->scb_data->sense[offset]);
+}
+
+static uint32_t
+ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb)
+{
+ int offset;
+
+ offset = scb - ahc->scb_data->scbarray;
+ return (ahc->scb_data->sense_busaddr
+ + (offset * sizeof(struct scsi_sense_data)));
+}
+
+/************************** Interrupt Processing ******************************/
+static void
+ahc_sync_qoutfifo(struct ahc_softc *ahc, int op)
+{
+ ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
+ /*offset*/0, /*len*/256, op);
+}
+
+static void
+ahc_sync_tqinfifo(struct ahc_softc *ahc, int op)
+{
+#ifdef AHC_TARGET_MODE
+ if ((ahc->flags & AHC_TARGETROLE) != 0) {
+ ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
+ ahc->shared_data_dmamap,
+ ahc_targetcmd_offset(ahc, 0),
+ sizeof(struct target_cmd) * AHC_TMODE_CMDS,
+ op);
+ }
+#endif
+}
+
+/*
+ * See if the firmware has posted any completed commands
+ * into our in-core command complete fifos.
+ */
+#define AHC_RUN_QOUTFIFO 0x1
+#define AHC_RUN_TQINFIFO 0x2
+static u_int
+ahc_check_cmdcmpltqueues(struct ahc_softc *ahc)
+{
+ u_int retval;
+
+ retval = 0;
+ ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
+ /*offset*/ahc->qoutfifonext, /*len*/1,
+ BUS_DMASYNC_POSTREAD);
+ if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL)
+ retval |= AHC_RUN_QOUTFIFO;
+#ifdef AHC_TARGET_MODE
+ if ((ahc->flags & AHC_TARGETROLE) != 0
+ && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) {
+ ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
+ ahc->shared_data_dmamap,
+ ahc_targetcmd_offset(ahc, ahc->tqinfifofnext),
+ /*len*/sizeof(struct target_cmd),
+ BUS_DMASYNC_POSTREAD);
+ if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0)
+ retval |= AHC_RUN_TQINFIFO;
+ }
+#endif
+ return (retval);
+}
+
+/*
+ * Catch an interrupt from the adapter
+ */
+int
+ahc_intr(struct ahc_softc *ahc)
+{
+ u_int intstat;
+
+ if ((ahc->pause & INTEN) == 0) {
+ /*
+ * Our interrupt is not enabled on the chip
+ * and may be disabled for re-entrancy reasons,
+ * so just return. This is likely just a shared
+ * interrupt.
+ */
+ return (0);
+ }
+ /*
+ * Instead of directly reading the interrupt status register,
+ * infer the cause of the interrupt by checking our in-core
+ * completion queues. This avoids a costly PCI bus read in
+ * most cases.
+ */
+ if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0
+ && (ahc_check_cmdcmpltqueues(ahc) != 0))
+ intstat = CMDCMPLT;
+ else {
+ intstat = ahc_inb(ahc, INTSTAT);
+ }
+
+ if ((intstat & INT_PEND) == 0) {
+#if AHC_PCI_CONFIG > 0
+ if (ahc->unsolicited_ints > 500) {
+ ahc->unsolicited_ints = 0;
+ if ((ahc->chip & AHC_PCI) != 0
+ && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
+ ahc->bus_intr(ahc);
+ }
+#endif
+ ahc->unsolicited_ints++;
+ return (0);
+ }
+ ahc->unsolicited_ints = 0;
+
+ if (intstat & CMDCMPLT) {
+ ahc_outb(ahc, CLRINT, CLRCMDINT);
+
+ /*
+ * Ensure that the chip sees that we've cleared
+ * this interrupt before we walk the output fifo.
+ * Otherwise, we may, due to posted bus writes,
+ * clear the interrupt after we finish the scan,
+ * and after the sequencer has added new entries
+ * and asserted the interrupt again.
+ */
+ ahc_flush_device_writes(ahc);
+ ahc_run_qoutfifo(ahc);
+#ifdef AHC_TARGET_MODE
+ if ((ahc->flags & AHC_TARGETROLE) != 0)
+ ahc_run_tqinfifo(ahc, /*paused*/FALSE);
+#endif
+ }
+
+ /*
+ * Handle statuses that may invalidate our cached
+ * copy of INTSTAT separately.
+ */
+ if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) {
+ /* Hot eject. Do nothing */
+ } else if (intstat & BRKADRINT) {
+ ahc_handle_brkadrint(ahc);
+ } else if ((intstat & (SEQINT|SCSIINT)) != 0) {
+
+ ahc_pause_bug_fix(ahc);
+
+ if ((intstat & SEQINT) != 0)
+ ahc_handle_seqint(ahc, intstat);
+
+ if ((intstat & SCSIINT) != 0)
+ ahc_handle_scsiint(ahc, intstat);
+ }
+ return (1);
+}
+
+/************************* Sequencer Execution Control ************************/
+/*
+ * Restart the sequencer program from address zero
+ */
+static void
ahc_restart(struct ahc_softc *ahc)
{
@@ -302,7 +872,7 @@ ahc_restart(struct ahc_softc *ahc)
}
/************************* Input/Output Queues ********************************/
-void
+static void
ahc_run_qoutfifo(struct ahc_softc *ahc)
{
struct scb *scb;
@@ -349,7 +919,7 @@ ahc_run_qoutfifo(struct ahc_softc *ahc)
}
}
-void
+static void
ahc_run_untagged_queues(struct ahc_softc *ahc)
{
int i;
@@ -358,7 +928,7 @@ ahc_run_untagged_queues(struct ahc_softc *ahc)
ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
}
-void
+static void
ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
{
struct scb *scb;
@@ -374,7 +944,7 @@ ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
}
/************************* Interrupt Handling *********************************/
-void
+static void
ahc_handle_brkadrint(struct ahc_softc *ahc)
{
/*
@@ -403,7 +973,7 @@ ahc_handle_brkadrint(struct ahc_softc *ahc)
ahc_shutdown(ahc);
}
-void
+static void
ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
{
struct scb *scb;
@@ -954,7 +1524,7 @@ unpause:
ahc_unpause(ahc);
}
-void
+static void
ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
{
u_int scb_index;
@@ -1407,7 +1977,7 @@ ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
}
#define AHC_MAX_STEPS 2000
-void
+static void
ahc_clear_critical_section(struct ahc_softc *ahc)
{
int stepping;
@@ -1500,7 +2070,7 @@ ahc_clear_critical_section(struct ahc_softc *ahc)
/*
* Clear any pending interrupt status.
*/
-void
+static void
ahc_clear_intstat(struct ahc_softc *ahc)
{
/* Clear any interrupt conditions this may have caused */
@@ -1519,7 +2089,8 @@ ahc_clear_intstat(struct ahc_softc *ahc)
uint32_t ahc_debug = AHC_DEBUG_OPTS;
#endif
-void
+#if 0 /* unused */
+static void
ahc_print_scb(struct scb *scb)
{
int i;
@@ -1551,6 +2122,7 @@ ahc_print_scb(struct scb *scb)
}
}
}
+#endif
/************************* Transfer Negotiation *******************************/
/*
@@ -1634,7 +2206,7 @@ ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
* by the capabilities of the bus connectivity of and sync settings for
* the target.
*/
-struct ahc_syncrate *
+const struct ahc_syncrate *
ahc_devlimited_syncrate(struct ahc_softc *ahc,
struct ahc_initiator_tinfo *tinfo,
u_int *period, u_int *ppr_options, role_t role)
@@ -1689,11 +2261,11 @@ ahc_devlimited_syncrate(struct ahc_softc *ahc,
* Return the period and offset that should be sent to the target
* if this was the beginning of an SDTR.
*/
-struct ahc_syncrate *
+const struct ahc_syncrate *
ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
u_int *ppr_options, u_int maxsync)
{
- struct ahc_syncrate *syncrate;
+ const struct ahc_syncrate *syncrate;
if ((ahc->features & AHC_DT) == 0)
*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
@@ -1768,7 +2340,7 @@ ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
u_int
ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
{
- struct ahc_syncrate *syncrate;
+ const struct ahc_syncrate *syncrate;
if ((ahc->features & AHC_ULTRA2) != 0)
scsirate &= SXFR_ULTRA2;
@@ -1806,10 +2378,10 @@ ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
* Truncate the given synchronous offset to a value the
* current adapter type and syncrate are capable of.
*/
-void
+static void
ahc_validate_offset(struct ahc_softc *ahc,
struct ahc_initiator_tinfo *tinfo,
- struct ahc_syncrate *syncrate,
+ const struct ahc_syncrate *syncrate,
u_int *offset, int wide, role_t role)
{
u_int maxoffset;
@@ -1838,7 +2410,7 @@ ahc_validate_offset(struct ahc_softc *ahc,
* Truncate the given transfer width parameter to a value the
* current adapter type is capable of.
*/
-void
+static void
ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
u_int *bus_width, role_t role)
{
@@ -1913,7 +2485,7 @@ ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
*/
void
ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
- struct ahc_syncrate *syncrate, u_int period,
+ const struct ahc_syncrate *syncrate, u_int period,
u_int offset, u_int ppr_options, u_int type, int paused)
{
struct ahc_initiator_tinfo *tinfo;
@@ -2220,11 +2792,11 @@ ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
role);
}
-struct ahc_phase_table_entry*
+static const struct ahc_phase_table_entry*
ahc_lookup_phase_entry(int phase)
{
- struct ahc_phase_table_entry *entry;
- struct ahc_phase_table_entry *last_entry;
+ const struct ahc_phase_table_entry *entry;
+ const struct ahc_phase_table_entry *last_entry;
/*
* num_phases doesn't include the default entry which
@@ -2390,7 +2962,7 @@ ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
*/
struct ahc_initiator_tinfo *tinfo;
struct ahc_tmode_tstate *tstate;
- struct ahc_syncrate *rate;
+ const struct ahc_syncrate *rate;
int dowide;
int dosync;
int doppr;
@@ -2655,7 +3227,7 @@ proto_violation_reset:
*/
static void
ahc_handle_message_phase(struct ahc_softc *ahc)
-{
+{
struct ahc_devinfo devinfo;
u_int bus_phase;
int end_session;
@@ -3056,7 +3628,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
switch (ahc->msgin_buf[2]) {
case MSG_EXT_SDTR:
{
- struct ahc_syncrate *syncrate;
+ const struct ahc_syncrate *syncrate;
u_int period;
u_int ppr_options;
u_int offset;
@@ -3231,7 +3803,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
}
case MSG_EXT_PPR:
{
- struct ahc_syncrate *syncrate;
+ const struct ahc_syncrate *syncrate;
u_int period;
u_int offset;
u_int bus_width;
@@ -3984,7 +4556,7 @@ ahc_free(struct ahc_softc *ahc)
return;
}
-void
+static void
ahc_shutdown(void *arg)
{
struct ahc_softc *ahc;
@@ -4388,7 +4960,7 @@ ahc_fini_scbdata(struct ahc_softc *ahc)
free(scb_data->scbarray, M_DEVBUF);
}
-void
+static void
ahc_alloc_scbs(struct ahc_softc *ahc)
{
struct scb_data *scb_data;
@@ -5121,7 +5693,7 @@ ahc_resume(struct ahc_softc *ahc)
* Return the untagged transaction id for a given target/channel lun.
* Optionally, clear the entry.
*/
-u_int
+static u_int
ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
{
u_int scbid;
@@ -5142,7 +5714,7 @@ ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
return (scbid);
}
-void
+static void
ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
{
u_int target_offset;
@@ -5160,7 +5732,7 @@ ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
}
}
-void
+static void
ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
{
u_int target_offset;
@@ -5215,7 +5787,7 @@ ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
return match;
}
-void
+static void
ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
{
int target;
@@ -5707,7 +6279,7 @@ ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
*/
static u_int
ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
-{
+{
u_int curscb, next;
/*
@@ -5756,7 +6328,7 @@ ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
* been modified from CAM_REQ_INPROG. This routine assumes that the sequencer
* is paused before it is called.
*/
-int
+static int
ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
int lun, u_int tag, role_t role, uint32_t status)
{
@@ -6078,7 +6650,7 @@ ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
/*
* Calculate the residual for a just completed SCB.
*/
-void
+static void
ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb)
{
struct hardware_scb *hscb;
@@ -6279,7 +6851,7 @@ ahc_loadseq(struct ahc_softc *ahc)
struct cs cs_table[num_critical_sections];
u_int begin_set[num_critical_sections];
u_int end_set[num_critical_sections];
- struct patch *cur_patch;
+ const struct patch *cur_patch;
u_int cs_count;
u_int cur_cs;
u_int i;
@@ -6384,11 +6956,11 @@ ahc_loadseq(struct ahc_softc *ahc)
}
static int
-ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch,
+ahc_check_patch(struct ahc_softc *ahc, const struct patch **start_patch,
u_int start_instr, u_int *skip_addr)
{
- struct patch *cur_patch;
- struct patch *last_patch;
+ const struct patch *cur_patch;
+ const struct patch *last_patch;
u_int num_patches;
num_patches = ARRAY_SIZE(patches);
@@ -6447,7 +7019,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
case AIC_OP_JE:
case AIC_OP_JZ:
{
- struct patch *cur_patch;
+ const struct patch *cur_patch;
int address_offset;
u_int address;
u_int skip_addr;
@@ -6545,7 +7117,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
}
int
-ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries,
+ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries,
const char *name, u_int address, u_int value,
u_int *cur_column, u_int wrap_point)
{
@@ -7229,7 +7801,7 @@ ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
ahc_outb(ahc, SCSIID, scsiid);
}
-void
+static void
ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
{
struct target_cmd *cmd;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_inline.h b/drivers/scsi/aic7xxx/aic7xxx_inline.h
index cba2f23bbe79..09bf2f4d78d5 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_inline.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_inline.h
@@ -46,179 +46,13 @@
#define _AIC7XXX_INLINE_H_
/************************* Sequencer Execution Control ************************/
-static __inline void ahc_pause_bug_fix(struct ahc_softc *ahc);
-static __inline int ahc_is_paused(struct ahc_softc *ahc);
-static __inline void ahc_pause(struct ahc_softc *ahc);
-static __inline void ahc_unpause(struct ahc_softc *ahc);
-
-/*
- * Work around any chip bugs related to halting sequencer execution.
- * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
- * reading a register that will set this signal and deassert it.
- * Without this workaround, if the chip is paused, by an interrupt or
- * manual pause while accessing scb ram, accesses to certain registers
- * will hang the system (infinite pci retries).
- */
-static __inline void
-ahc_pause_bug_fix(struct ahc_softc *ahc)
-{
- if ((ahc->features & AHC_ULTRA2) != 0)
- (void)ahc_inb(ahc, CCSCBCTL);
-}
-
-/*
- * Determine whether the sequencer has halted code execution.
- * Returns non-zero status if the sequencer is stopped.
- */
-static __inline int
-ahc_is_paused(struct ahc_softc *ahc)
-{
- return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0);
-}
-
-/*
- * Request that the sequencer stop and wait, indefinitely, for it
- * to stop. The sequencer will only acknowledge that it is paused
- * once it has reached an instruction boundary and PAUSEDIS is
- * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
- * for critical sections.
- */
-static __inline void
-ahc_pause(struct ahc_softc *ahc)
-{
- ahc_outb(ahc, HCNTRL, ahc->pause);
-
- /*
- * Since the sequencer can disable pausing in a critical section, we
- * must loop until it actually stops.
- */
- while (ahc_is_paused(ahc) == 0)
- ;
-
- ahc_pause_bug_fix(ahc);
-}
-
-/*
- * Allow the sequencer to continue program execution.
- * We check here to ensure that no additional interrupt
- * sources that would cause the sequencer to halt have been
- * asserted. If, for example, a SCSI bus reset is detected
- * while we are fielding a different, pausing, interrupt type,
- * we don't want to release the sequencer before going back
- * into our interrupt handler and dealing with this new
- * condition.
- */
-static __inline void
-ahc_unpause(struct ahc_softc *ahc)
-{
- if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0)
- ahc_outb(ahc, HCNTRL, ahc->unpause);
-}
-
-/*********************** Untagged Transaction Routines ************************/
-static __inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc);
-static __inline void ahc_release_untagged_queues(struct ahc_softc *ahc);
-
-/*
- * Block our completion routine from starting the next untagged
- * transaction for this target or target lun.
- */
-static __inline void
-ahc_freeze_untagged_queues(struct ahc_softc *ahc)
-{
- if ((ahc->flags & AHC_SCB_BTT) == 0)
- ahc->untagged_queue_lock++;
-}
-
-/*
- * Allow the next untagged transaction for this target or target lun
- * to be executed. We use a counting semaphore to allow the lock
- * to be acquired recursively. Once the count drops to zero, the
- * transaction queues will be run.
- */
-static __inline void
-ahc_release_untagged_queues(struct ahc_softc *ahc)
-{
- if ((ahc->flags & AHC_SCB_BTT) == 0) {
- ahc->untagged_queue_lock--;
- if (ahc->untagged_queue_lock == 0)
- ahc_run_untagged_queues(ahc);
- }
-}
+int ahc_is_paused(struct ahc_softc *ahc);
+void ahc_pause(struct ahc_softc *ahc);
+void ahc_unpause(struct ahc_softc *ahc);
/************************** Memory mapping routines ***************************/
-static __inline struct ahc_dma_seg *
- ahc_sg_bus_to_virt(struct scb *scb,
- uint32_t sg_busaddr);
-static __inline uint32_t
- ahc_sg_virt_to_bus(struct scb *scb,
- struct ahc_dma_seg *sg);
-static __inline uint32_t
- ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index);
-static __inline void ahc_sync_scb(struct ahc_softc *ahc,
- struct scb *scb, int op);
-static __inline void ahc_sync_sglist(struct ahc_softc *ahc,
- struct scb *scb, int op);
-static __inline uint32_t
- ahc_targetcmd_offset(struct ahc_softc *ahc,
- u_int index);
-
-static __inline struct ahc_dma_seg *
-ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
-{
- int sg_index;
-
- sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
- /* sg_list_phys points to entry 1, not 0 */
- sg_index++;
-
- return (&scb->sg_list[sg_index]);
-}
-
-static __inline uint32_t
-ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
-{
- int sg_index;
-
- /* sg_list_phys points to entry 1, not 0 */
- sg_index = sg - &scb->sg_list[1];
-
- return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
-}
-
-static __inline uint32_t
-ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
-{
- return (ahc->scb_data->hscb_busaddr
- + (sizeof(struct hardware_scb) * index));
-}
-
-static __inline void
-ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op)
-{
- ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat,
- ahc->scb_data->hscb_dmamap,
- /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb),
- /*len*/sizeof(*scb->hscb), op);
-}
-
-static __inline void
-ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op)
-{
- if (scb->sg_count == 0)
- return;
-
- ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap,
- /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr)
- * sizeof(struct ahc_dma_seg),
- /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op);
-}
-
-static __inline uint32_t
-ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index)
-{
- return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo);
-}
+void ahc_sync_sglist(struct ahc_softc *ahc,
+ struct scb *scb, int op);
/******************************** Debugging ***********************************/
static __inline char *ahc_name(struct ahc_softc *ahc);
@@ -231,420 +65,34 @@ ahc_name(struct ahc_softc *ahc)
/*********************** Miscellaneous Support Functions ***********************/
-static __inline void ahc_update_residual(struct ahc_softc *ahc,
- struct scb *scb);
-static __inline struct ahc_initiator_tinfo *
- ahc_fetch_transinfo(struct ahc_softc *ahc,
- char channel, u_int our_id,
- u_int remote_id,
- struct ahc_tmode_tstate **tstate);
-static __inline uint16_t
- ahc_inw(struct ahc_softc *ahc, u_int port);
-static __inline void ahc_outw(struct ahc_softc *ahc, u_int port,
- u_int value);
-static __inline uint32_t
- ahc_inl(struct ahc_softc *ahc, u_int port);
-static __inline void ahc_outl(struct ahc_softc *ahc, u_int port,
- uint32_t value);
-static __inline uint64_t
- ahc_inq(struct ahc_softc *ahc, u_int port);
-static __inline void ahc_outq(struct ahc_softc *ahc, u_int port,
- uint64_t value);
-static __inline struct scb*
- ahc_get_scb(struct ahc_softc *ahc);
-static __inline void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb);
-static __inline void ahc_swap_with_next_hscb(struct ahc_softc *ahc,
- struct scb *scb);
-static __inline void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb);
-static __inline struct scsi_sense_data *
- ahc_get_sense_buf(struct ahc_softc *ahc,
- struct scb *scb);
-static __inline uint32_t
- ahc_get_sense_bufaddr(struct ahc_softc *ahc,
- struct scb *scb);
-
-/*
- * Determine whether the sequencer reported a residual
- * for this SCB/transaction.
- */
-static __inline void
-ahc_update_residual(struct ahc_softc *ahc, struct scb *scb)
-{
- uint32_t sgptr;
-
- sgptr = ahc_le32toh(scb->hscb->sgptr);
- if ((sgptr & SG_RESID_VALID) != 0)
- ahc_calc_residual(ahc, scb);
-}
-
-/*
- * Return pointers to the transfer negotiation information
- * for the specified our_id/remote_id pair.
- */
-static __inline struct ahc_initiator_tinfo *
-ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
- u_int remote_id, struct ahc_tmode_tstate **tstate)
-{
- /*
- * Transfer data structures are stored from the perspective
- * of the target role. Since the parameters for a connection
- * in the initiator role to a given target are the same as
- * when the roles are reversed, we pretend we are the target.
- */
- if (channel == 'B')
- our_id += 8;
- *tstate = ahc->enabled_targets[our_id];
- return (&(*tstate)->transinfo[remote_id]);
-}
-
-static __inline uint16_t
-ahc_inw(struct ahc_softc *ahc, u_int port)
-{
- uint16_t r = ahc_inb(ahc, port+1) << 8;
- return r | ahc_inb(ahc, port);
-}
-
-static __inline void
-ahc_outw(struct ahc_softc *ahc, u_int port, u_int value)
-{
- ahc_outb(ahc, port, value & 0xFF);
- ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
-}
-
-static __inline uint32_t
-ahc_inl(struct ahc_softc *ahc, u_int port)
-{
- return ((ahc_inb(ahc, port))
- | (ahc_inb(ahc, port+1) << 8)
- | (ahc_inb(ahc, port+2) << 16)
- | (ahc_inb(ahc, port+3) << 24));
-}
-
-static __inline void
-ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value)
-{
- ahc_outb(ahc, port, (value) & 0xFF);
- ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF);
- ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF);
- ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF);
-}
-
-static __inline uint64_t
-ahc_inq(struct ahc_softc *ahc, u_int port)
-{
- return ((ahc_inb(ahc, port))
- | (ahc_inb(ahc, port+1) << 8)
- | (ahc_inb(ahc, port+2) << 16)
- | (ahc_inb(ahc, port+3) << 24)
- | (((uint64_t)ahc_inb(ahc, port+4)) << 32)
- | (((uint64_t)ahc_inb(ahc, port+5)) << 40)
- | (((uint64_t)ahc_inb(ahc, port+6)) << 48)
- | (((uint64_t)ahc_inb(ahc, port+7)) << 56));
-}
-
-static __inline void
-ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value)
-{
- ahc_outb(ahc, port, value & 0xFF);
- ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
- ahc_outb(ahc, port+2, (value >> 16) & 0xFF);
- ahc_outb(ahc, port+3, (value >> 24) & 0xFF);
- ahc_outb(ahc, port+4, (value >> 32) & 0xFF);
- ahc_outb(ahc, port+5, (value >> 40) & 0xFF);
- ahc_outb(ahc, port+6, (value >> 48) & 0xFF);
- ahc_outb(ahc, port+7, (value >> 56) & 0xFF);
-}
-
-/*
- * Get a free scb. If there are none, see if we can allocate a new SCB.
- */
-static __inline struct scb *
-ahc_get_scb(struct ahc_softc *ahc)
-{
- struct scb *scb;
-
- if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) {
- ahc_alloc_scbs(ahc);
- scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
- if (scb == NULL)
- return (NULL);
- }
- SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
- return (scb);
-}
-
-/*
- * Return an SCB resource to the free list.
- */
-static __inline void
-ahc_free_scb(struct ahc_softc *ahc, struct scb *scb)
-{
- struct hardware_scb *hscb;
-
- hscb = scb->hscb;
- /* Clean up for the next user */
- ahc->scb_data->scbindex[hscb->tag] = NULL;
- scb->flags = SCB_FREE;
- hscb->control = 0;
-
- SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
-
- /* Notify the OSM that a resource is now available. */
- ahc_platform_scb_free(ahc, scb);
-}
-
-static __inline struct scb *
-ahc_lookup_scb(struct ahc_softc *ahc, u_int tag)
-{
- struct scb* scb;
-
- scb = ahc->scb_data->scbindex[tag];
- if (scb != NULL)
- ahc_sync_scb(ahc, scb,
- BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
- return (scb);
-}
-
-static __inline void
-ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb)
-{
- struct hardware_scb *q_hscb;
- u_int saved_tag;
-
- /*
- * Our queuing method is a bit tricky. The card
- * knows in advance which HSCB to download, and we
- * can't disappoint it. To achieve this, the next
- * SCB to download is saved off in ahc->next_queued_scb.
- * When we are called to queue "an arbitrary scb",
- * we copy the contents of the incoming HSCB to the one
- * the sequencer knows about, swap HSCB pointers and
- * finally assign the SCB to the tag indexed location
- * in the scb_array. This makes sure that we can still
- * locate the correct SCB by SCB_TAG.
- */
- q_hscb = ahc->next_queued_scb->hscb;
- saved_tag = q_hscb->tag;
- memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
- if ((scb->flags & SCB_CDB32_PTR) != 0) {
- q_hscb->shared_data.cdb_ptr =
- ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag)
- + offsetof(struct hardware_scb, cdb32));
- }
- q_hscb->tag = saved_tag;
- q_hscb->next = scb->hscb->tag;
-
- /* Now swap HSCB pointers. */
- ahc->next_queued_scb->hscb = scb->hscb;
- scb->hscb = q_hscb;
-
- /* Now define the mapping from tag to SCB in the scbindex */
- ahc->scb_data->scbindex[scb->hscb->tag] = scb;
-}
-
-/*
- * Tell the sequencer about a new transaction to execute.
- */
-static __inline void
-ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
-{
- ahc_swap_with_next_hscb(ahc, scb);
-
- if (scb->hscb->tag == SCB_LIST_NULL
- || scb->hscb->next == SCB_LIST_NULL)
- panic("Attempt to queue invalid SCB tag %x:%x\n",
- scb->hscb->tag, scb->hscb->next);
-
- /*
- * Setup data "oddness".
- */
- scb->hscb->lun &= LID;
- if (ahc_get_transfer_length(scb) & 0x1)
- scb->hscb->lun |= SCB_XFERLEN_ODD;
-
- /*
- * Keep a history of SCBs we've downloaded in the qinfifo.
- */
- ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
-
- /*
- * Make sure our data is consistent from the
- * perspective of the adapter.
- */
- ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
-
- /* Tell the adapter about the newly queued SCB */
- if ((ahc->features & AHC_QUEUE_REGS) != 0) {
- ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
- } else {
- if ((ahc->features & AHC_AUTOPAUSE) == 0)
- ahc_pause(ahc);
- ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
- if ((ahc->features & AHC_AUTOPAUSE) == 0)
- ahc_unpause(ahc);
- }
-}
-
-static __inline struct scsi_sense_data *
-ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb)
-{
- int offset;
-
- offset = scb - ahc->scb_data->scbarray;
- return (&ahc->scb_data->sense[offset]);
-}
-
-static __inline uint32_t
-ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb)
-{
- int offset;
-
- offset = scb - ahc->scb_data->scbarray;
- return (ahc->scb_data->sense_busaddr
- + (offset * sizeof(struct scsi_sense_data)));
-}
+struct ahc_initiator_tinfo *
+ ahc_fetch_transinfo(struct ahc_softc *ahc,
+ char channel, u_int our_id,
+ u_int remote_id,
+ struct ahc_tmode_tstate **tstate);
+uint16_t
+ ahc_inw(struct ahc_softc *ahc, u_int port);
+void ahc_outw(struct ahc_softc *ahc, u_int port,
+ u_int value);
+uint32_t
+ ahc_inl(struct ahc_softc *ahc, u_int port);
+void ahc_outl(struct ahc_softc *ahc, u_int port,
+ uint32_t value);
+uint64_t
+ ahc_inq(struct ahc_softc *ahc, u_int port);
+void ahc_outq(struct ahc_softc *ahc, u_int port,
+ uint64_t value);
+struct scb*
+ ahc_get_scb(struct ahc_softc *ahc);
+void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb);
+struct scb *
+ ahc_lookup_scb(struct ahc_softc *ahc, u_int tag);
+void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb);
+struct scsi_sense_data *
+ ahc_get_sense_buf(struct ahc_softc *ahc,
+ struct scb *scb);
/************************** Interrupt Processing ******************************/
-static __inline void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op);
-static __inline void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op);
-static __inline u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc);
-static __inline int ahc_intr(struct ahc_softc *ahc);
-
-static __inline void
-ahc_sync_qoutfifo(struct ahc_softc *ahc, int op)
-{
- ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
- /*offset*/0, /*len*/256, op);
-}
-
-static __inline void
-ahc_sync_tqinfifo(struct ahc_softc *ahc, int op)
-{
-#ifdef AHC_TARGET_MODE
- if ((ahc->flags & AHC_TARGETROLE) != 0) {
- ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
- ahc->shared_data_dmamap,
- ahc_targetcmd_offset(ahc, 0),
- sizeof(struct target_cmd) * AHC_TMODE_CMDS,
- op);
- }
-#endif
-}
-
-/*
- * See if the firmware has posted any completed commands
- * into our in-core command complete fifos.
- */
-#define AHC_RUN_QOUTFIFO 0x1
-#define AHC_RUN_TQINFIFO 0x2
-static __inline u_int
-ahc_check_cmdcmpltqueues(struct ahc_softc *ahc)
-{
- u_int retval;
-
- retval = 0;
- ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
- /*offset*/ahc->qoutfifonext, /*len*/1,
- BUS_DMASYNC_POSTREAD);
- if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL)
- retval |= AHC_RUN_QOUTFIFO;
-#ifdef AHC_TARGET_MODE
- if ((ahc->flags & AHC_TARGETROLE) != 0
- && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) {
- ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
- ahc->shared_data_dmamap,
- ahc_targetcmd_offset(ahc, ahc->tqinfifofnext),
- /*len*/sizeof(struct target_cmd),
- BUS_DMASYNC_POSTREAD);
- if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0)
- retval |= AHC_RUN_TQINFIFO;
- }
-#endif
- return (retval);
-}
-
-/*
- * Catch an interrupt from the adapter
- */
-static __inline int
-ahc_intr(struct ahc_softc *ahc)
-{
- u_int intstat;
-
- if ((ahc->pause & INTEN) == 0) {
- /*
- * Our interrupt is not enabled on the chip
- * and may be disabled for re-entrancy reasons,
- * so just return. This is likely just a shared
- * interrupt.
- */
- return (0);
- }
- /*
- * Instead of directly reading the interrupt status register,
- * infer the cause of the interrupt by checking our in-core
- * completion queues. This avoids a costly PCI bus read in
- * most cases.
- */
- if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0
- && (ahc_check_cmdcmpltqueues(ahc) != 0))
- intstat = CMDCMPLT;
- else {
- intstat = ahc_inb(ahc, INTSTAT);
- }
-
- if ((intstat & INT_PEND) == 0) {
-#if AHC_PCI_CONFIG > 0
- if (ahc->unsolicited_ints > 500) {
- ahc->unsolicited_ints = 0;
- if ((ahc->chip & AHC_PCI) != 0
- && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
- ahc->bus_intr(ahc);
- }
-#endif
- ahc->unsolicited_ints++;
- return (0);
- }
- ahc->unsolicited_ints = 0;
-
- if (intstat & CMDCMPLT) {
- ahc_outb(ahc, CLRINT, CLRCMDINT);
-
- /*
- * Ensure that the chip sees that we've cleared
- * this interrupt before we walk the output fifo.
- * Otherwise, we may, due to posted bus writes,
- * clear the interrupt after we finish the scan,
- * and after the sequencer has added new entries
- * and asserted the interrupt again.
- */
- ahc_flush_device_writes(ahc);
- ahc_run_qoutfifo(ahc);
-#ifdef AHC_TARGET_MODE
- if ((ahc->flags & AHC_TARGETROLE) != 0)
- ahc_run_tqinfifo(ahc, /*paused*/FALSE);
-#endif
- }
-
- /*
- * Handle statuses that may invalidate our cached
- * copy of INTSTAT separately.
- */
- if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) {
- /* Hot eject. Do nothing */
- } else if (intstat & BRKADRINT) {
- ahc_handle_brkadrint(ahc);
- } else if ((intstat & (SEQINT|SCSIINT)) != 0) {
-
- ahc_pause_bug_fix(ahc);
-
- if ((intstat & SEQINT) != 0)
- ahc_handle_seqint(ahc, intstat);
-
- if ((intstat & SCSIINT) != 0)
- ahc_handle_scsiint(ahc, intstat);
- }
- return (1);
-}
+int ahc_intr(struct ahc_softc *ahc);
#endif /* _AIC7XXX_INLINE_H_ */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 42ad48e09f02..fd2b9785ff4f 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -388,14 +388,83 @@ static int aic7xxx_setup(char *s);
static int ahc_linux_unit;
+/************************** OS Utility Wrappers *******************************/
+void
+ahc_delay(long usec)
+{
+ /*
+ * udelay on Linux can have problems for
+ * multi-millisecond waits. Wait at most
+ * 1024us per call.
+ */
+ while (usec > 0) {
+ udelay(usec % 1024);
+ usec -= 1024;
+ }
+}
+
+/***************************** Low Level I/O **********************************/
+uint8_t
+ahc_inb(struct ahc_softc * ahc, long port)
+{
+ uint8_t x;
+
+ if (ahc->tag == BUS_SPACE_MEMIO) {
+ x = readb(ahc->bsh.maddr + port);
+ } else {
+ x = inb(ahc->bsh.ioport + port);
+ }
+ mb();
+ return (x);
+}
+
+void
+ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
+{
+ if (ahc->tag == BUS_SPACE_MEMIO) {
+ writeb(val, ahc->bsh.maddr + port);
+ } else {
+ outb(val, ahc->bsh.ioport + port);
+ }
+ mb();
+}
+
+void
+ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
+{
+ int i;
+
+ /*
+ * There is probably a more efficient way to do this on Linux
+ * but we don't use this for anything speed critical and this
+ * should work.
+ */
+ for (i = 0; i < count; i++)
+ ahc_outb(ahc, port, *array++);
+}
+
+void
+ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
+{
+ int i;
+
+ /*
+ * There is probably a more efficient way to do this on Linux
+ * but we don't use this for anything speed critical and this
+ * should work.
+ */
+ for (i = 0; i < count; i++)
+ *array++ = ahc_inb(ahc, port);
+}
+
/********************************* Inlines ************************************/
-static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
+static void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
-static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
+static int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
struct ahc_dma_seg *sg,
dma_addr_t addr, bus_size_t len);
-static __inline void
+static void
ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
{
struct scsi_cmnd *cmd;
@@ -406,7 +475,7 @@ ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
scsi_dma_unmap(cmd);
}
-static __inline int
+static int
ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len)
{
@@ -442,13 +511,11 @@ ahc_linux_info(struct Scsi_Host *host)
bp = &buffer[0];
ahc = *(struct ahc_softc **)host->hostdata;
memset(bp, 0, sizeof(buffer));
- strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev ");
- strcat(bp, AIC7XXX_DRIVER_VERSION);
- strcat(bp, "\n");
- strcat(bp, " <");
+ strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev " AIC7XXX_DRIVER_VERSION "\n"
+ " <");
strcat(bp, ahc->description);
- strcat(bp, ">\n");
- strcat(bp, " ");
+ strcat(bp, ">\n"
+ " ");
ahc_controller_info(ahc, ahc_info);
strcat(bp, ahc_info);
strcat(bp, "\n");
@@ -964,7 +1031,7 @@ aic7xxx_setup(char *s)
char *p;
char *end;
- static struct {
+ static const struct {
const char *name;
uint32_t *flag;
} options[] = {
@@ -2317,7 +2384,7 @@ static void ahc_linux_set_period(struct scsi_target *starget, int period)
unsigned int ppr_options = tinfo->goal.ppr_options;
unsigned long flags;
unsigned long offset = tinfo->goal.offset;
- struct ahc_syncrate *syncrate;
+ const struct ahc_syncrate *syncrate;
if (offset == 0)
offset = MAX_OFFSET;
@@ -2361,7 +2428,7 @@ static void ahc_linux_set_offset(struct scsi_target *starget, int offset)
unsigned int ppr_options = 0;
unsigned int period = 0;
unsigned long flags;
- struct ahc_syncrate *syncrate = NULL;
+ const struct ahc_syncrate *syncrate = NULL;
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
@@ -2391,7 +2458,7 @@ static void ahc_linux_set_dt(struct scsi_target *starget, int dt)
unsigned int period = tinfo->goal.period;
unsigned int width = tinfo->goal.width;
unsigned long flags;
- struct ahc_syncrate *syncrate;
+ const struct ahc_syncrate *syncrate;
if (dt && spi_max_width(starget)) {
ppr_options |= MSG_EXT_PPR_DT_REQ;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index b48dab447bde..3f7238db35e5 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -365,7 +365,7 @@ struct ahc_platform_data {
#define AHC_LINUX_NOIRQ ((uint32_t)~0)
uint32_t irq; /* IRQ for this adapter */
uint32_t bios_address;
- uint32_t mem_busaddr; /* Mem Base Addr */
+ resource_size_t mem_busaddr; /* Mem Base Addr */
};
/************************** OS Utility Wrappers *******************************/
@@ -375,82 +375,16 @@ struct ahc_platform_data {
#define malloc(size, type, flags) kmalloc(size, flags)
#define free(ptr, type) kfree(ptr)
-static __inline void ahc_delay(long);
-static __inline void
-ahc_delay(long usec)
-{
- /*
- * udelay on Linux can have problems for
- * multi-millisecond waits. Wait at most
- * 1024us per call.
- */
- while (usec > 0) {
- udelay(usec % 1024);
- usec -= 1024;
- }
-}
+void ahc_delay(long);
/***************************** Low Level I/O **********************************/
-static __inline uint8_t ahc_inb(struct ahc_softc * ahc, long port);
-static __inline void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val);
-static __inline void ahc_outsb(struct ahc_softc * ahc, long port,
- uint8_t *, int count);
-static __inline void ahc_insb(struct ahc_softc * ahc, long port,
- uint8_t *, int count);
-
-static __inline uint8_t
-ahc_inb(struct ahc_softc * ahc, long port)
-{
- uint8_t x;
-
- if (ahc->tag == BUS_SPACE_MEMIO) {
- x = readb(ahc->bsh.maddr + port);
- } else {
- x = inb(ahc->bsh.ioport + port);
- }
- mb();
- return (x);
-}
-
-static __inline void
-ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
-{
- if (ahc->tag == BUS_SPACE_MEMIO) {
- writeb(val, ahc->bsh.maddr + port);
- } else {
- outb(val, ahc->bsh.ioport + port);
- }
- mb();
-}
-
-static __inline void
-ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
-{
- int i;
-
- /*
- * There is probably a more efficient way to do this on Linux
- * but we don't use this for anything speed critical and this
- * should work.
- */
- for (i = 0; i < count; i++)
- ahc_outb(ahc, port, *array++);
-}
-
-static __inline void
-ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
-{
- int i;
-
- /*
- * There is probably a more efficient way to do this on Linux
- * but we don't use this for anything speed critical and this
- * should work.
- */
- for (i = 0; i < count; i++)
- *array++ = ahc_inb(ahc, port);
-}
+uint8_t ahc_inb(struct ahc_softc * ahc, long port);
+void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val);
+void ahc_outsb(struct ahc_softc * ahc, long port,
+ uint8_t *, int count);
+void ahc_insb(struct ahc_softc * ahc, long port,
+ uint8_t *, int count);
/**************************** Initialization **********************************/
int ahc_linux_register_host(struct ahc_softc *,
@@ -464,9 +398,6 @@ struct info_str {
int pos;
};
-void ahc_format_transinfo(struct info_str *info,
- struct ahc_transinfo *tinfo);
-
/******************************** Locking *************************************/
/* Lock protecting internal data structures */
@@ -555,61 +486,12 @@ void ahc_linux_pci_exit(void);
int ahc_pci_map_registers(struct ahc_softc *ahc);
int ahc_pci_map_int(struct ahc_softc *ahc);
-static __inline uint32_t ahc_pci_read_config(ahc_dev_softc_t pci,
+uint32_t ahc_pci_read_config(ahc_dev_softc_t pci,
int reg, int width);
-static __inline uint32_t
-ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width)
-{
- switch (width) {
- case 1:
- {
- uint8_t retval;
-
- pci_read_config_byte(pci, reg, &retval);
- return (retval);
- }
- case 2:
- {
- uint16_t retval;
- pci_read_config_word(pci, reg, &retval);
- return (retval);
- }
- case 4:
- {
- uint32_t retval;
- pci_read_config_dword(pci, reg, &retval);
- return (retval);
- }
- default:
- panic("ahc_pci_read_config: Read size too big");
- /* NOTREACHED */
- return (0);
- }
-}
-
-static __inline void ahc_pci_write_config(ahc_dev_softc_t pci,
- int reg, uint32_t value,
- int width);
-
-static __inline void
-ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
-{
- switch (width) {
- case 1:
- pci_write_config_byte(pci, reg, value);
- break;
- case 2:
- pci_write_config_word(pci, reg, value);
- break;
- case 4:
- pci_write_config_dword(pci, reg, value);
- break;
- default:
- panic("ahc_pci_write_config: Write size too big");
- /* NOTREACHED */
- }
-}
+void ahc_pci_write_config(ahc_dev_softc_t pci,
+ int reg, uint32_t value,
+ int width);
static __inline int ahc_get_pci_function(ahc_dev_softc_t);
static __inline int
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 3d3eaef65fb3..0d7628f1f1ef 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -46,7 +46,7 @@
*/
#define ID(x) ID_C(x, PCI_CLASS_STORAGE_SCSI)
-static struct pci_device_id ahc_linux_pci_id_table[] = {
+static const struct pci_device_id ahc_linux_pci_id_table[] = {
/* aic7850 based controllers */
ID(ID_AHA_2902_04_10_15_20C_30C),
/* aic7860 based controllers */
@@ -206,7 +206,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
const uint64_t mask_39bit = 0x7FFFFFFFFFULL;
struct ahc_softc *ahc;
ahc_dev_softc_t pci;
- struct ahc_pci_identity *entry;
+ const struct ahc_pci_identity *entry;
char *name;
int error;
struct device *dev = &pdev->dev;
@@ -269,6 +269,57 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return (0);
}
+/******************************* PCI Routines *********************************/
+uint32_t
+ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width)
+{
+ switch (width) {
+ case 1:
+ {
+ uint8_t retval;
+
+ pci_read_config_byte(pci, reg, &retval);
+ return (retval);
+ }
+ case 2:
+ {
+ uint16_t retval;
+ pci_read_config_word(pci, reg, &retval);
+ return (retval);
+ }
+ case 4:
+ {
+ uint32_t retval;
+ pci_read_config_dword(pci, reg, &retval);
+ return (retval);
+ }
+ default:
+ panic("ahc_pci_read_config: Read size too big");
+ /* NOTREACHED */
+ return (0);
+ }
+}
+
+void
+ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
+{
+ switch (width) {
+ case 1:
+ pci_write_config_byte(pci, reg, value);
+ break;
+ case 2:
+ pci_write_config_word(pci, reg, value);
+ break;
+ case 4:
+ pci_write_config_dword(pci, reg, value);
+ break;
+ default:
+ panic("ahc_pci_write_config: Write size too big");
+ /* NOTREACHED */
+ }
+}
+
+
static struct pci_driver aic7xxx_pci_driver = {
.name = "aic7xxx",
.probe = ahc_linux_pci_dev_probe,
@@ -293,7 +344,7 @@ ahc_linux_pci_exit(void)
}
static int
-ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, u_long *base)
+ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, resource_size_t *base)
{
if (aic7xxx_allow_memio == 0)
return (ENOMEM);
@@ -308,10 +359,10 @@ ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, u_long *base)
static int
ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
- u_long *bus_addr,
+ resource_size_t *bus_addr,
uint8_t __iomem **maddr)
{
- u_long start;
+ resource_size_t start;
int error;
error = 0;
@@ -336,7 +387,7 @@ int
ahc_pci_map_registers(struct ahc_softc *ahc)
{
uint32_t command;
- u_long base;
+ resource_size_t base;
uint8_t __iomem *maddr;
int error;
@@ -374,12 +425,12 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
} else
command |= PCIM_CMD_MEMEN;
} else {
- printf("aic7xxx: PCI%d:%d:%d MEM region 0x%lx "
+ printf("aic7xxx: PCI%d:%d:%d MEM region 0x%llx "
"unavailable. Cannot memory map device.\n",
ahc_get_pci_bus(ahc->dev_softc),
ahc_get_pci_slot(ahc->dev_softc),
ahc_get_pci_function(ahc->dev_softc),
- base);
+ (unsigned long long)base);
}
/*
@@ -390,15 +441,15 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
error = ahc_linux_pci_reserve_io_region(ahc, &base);
if (error == 0) {
ahc->tag = BUS_SPACE_PIO;
- ahc->bsh.ioport = base;
+ ahc->bsh.ioport = (u_long)base;
command |= PCIM_CMD_PORTEN;
} else {
- printf("aic7xxx: PCI%d:%d:%d IO region 0x%lx[0..255] "
+ printf("aic7xxx: PCI%d:%d:%d IO region 0x%llx[0..255] "
"unavailable. Cannot map device.\n",
ahc_get_pci_bus(ahc->dev_softc),
ahc_get_pci_slot(ahc->dev_softc),
ahc_get_pci_function(ahc->dev_softc),
- base);
+ (unsigned long long)base);
}
}
ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, 4);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 56848f41e4f9..c07cb6eebb02 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -168,8 +168,7 @@ static ahc_device_setup_t ahc_aha394XX_setup;
static ahc_device_setup_t ahc_aha494XX_setup;
static ahc_device_setup_t ahc_aha398XX_setup;
-static struct ahc_pci_identity ahc_pci_ident_table [] =
-{
+static const struct ahc_pci_identity ahc_pci_ident_table[] = {
/* aic7850 based controllers */
{
ID_AHA_2902_04_10_15_20C_30C,
@@ -668,7 +667,7 @@ ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor,
return (result);
}
-struct ahc_pci_identity *
+const struct ahc_pci_identity *
ahc_find_pci_device(ahc_dev_softc_t pci)
{
uint64_t full_id;
@@ -676,7 +675,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
uint16_t vendor;
uint16_t subdevice;
uint16_t subvendor;
- struct ahc_pci_identity *entry;
+ const struct ahc_pci_identity *entry;
u_int i;
vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
@@ -710,7 +709,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
}
int
-ahc_pci_config(struct ahc_softc *ahc, struct ahc_pci_identity *entry)
+ahc_pci_config(struct ahc_softc *ahc, const struct ahc_pci_identity *entry)
{
u_int command;
u_int our_id;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c
index 99e5443e7535..e92991a7c485 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_proc.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c
@@ -58,7 +58,7 @@ static int ahc_proc_write_seeprom(struct ahc_softc *ahc,
* Table of syncrates that don't follow the "divisible by 4"
* rule. This table will be expanded in future SCSI specs.
*/
-static struct {
+static const struct {
u_int period_factor;
u_int period; /* in 100ths of ns */
} scsi_syncrates[] = {
@@ -137,7 +137,7 @@ copy_info(struct info_str *info, char *fmt, ...)
return (len);
}
-void
+static void
ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo)
{
u_int speed;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
index 88bfd767c51c..309a562b009e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
@@ -8,7 +8,7 @@
#include "aic7xxx_osm.h"
-static ahc_reg_parse_entry_t SCSISEQ_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSISEQ_parse_table[] = {
{ "SCSIRSTO", 0x01, 0x01 },
{ "ENAUTOATNP", 0x02, 0x02 },
{ "ENAUTOATNI", 0x04, 0x04 },
@@ -26,7 +26,7 @@ ahc_scsiseq_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x00, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SXFRCTL0_parse_table[] = {
+static const ahc_reg_parse_entry_t SXFRCTL0_parse_table[] = {
{ "CLRCHN", 0x02, 0x02 },
{ "SCAMEN", 0x04, 0x04 },
{ "SPIOEN", 0x08, 0x08 },
@@ -43,7 +43,7 @@ ahc_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x01, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SXFRCTL1_parse_table[] = {
+static const ahc_reg_parse_entry_t SXFRCTL1_parse_table[] = {
{ "STPWEN", 0x01, 0x01 },
{ "ACTNEGEN", 0x02, 0x02 },
{ "ENSTIMER", 0x04, 0x04 },
@@ -60,7 +60,7 @@ ahc_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x02, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCSISIGO_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSISIGO_parse_table[] = {
{ "ACKO", 0x01, 0x01 },
{ "REQO", 0x02, 0x02 },
{ "BSYO", 0x04, 0x04 },
@@ -85,7 +85,7 @@ ahc_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x03, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCSISIGI_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSISIGI_parse_table[] = {
{ "ACKI", 0x01, 0x01 },
{ "REQI", 0x02, 0x02 },
{ "BSYI", 0x04, 0x04 },
@@ -112,7 +112,7 @@ ahc_scsisigi_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x03, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCSIRATE_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSIRATE_parse_table[] = {
{ "SINGLE_EDGE", 0x10, 0x10 },
{ "ENABLE_CRC", 0x40, 0x40 },
{ "WIDEXFER", 0x80, 0x80 },
@@ -128,7 +128,7 @@ ahc_scsirate_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x04, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCSIID_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSIID_parse_table[] = {
{ "TWIN_CHNLB", 0x80, 0x80 },
{ "OID", 0x0f, 0x0f },
{ "TWIN_TID", 0x70, 0x70 },
@@ -151,20 +151,13 @@ ahc_scsidatl_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahc_scsidath_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCSIDATH",
- 0x07, regvalue, cur_col, wrap));
-}
-
-int
ahc_stcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahc_print_register(NULL, 0, "STCNT",
0x08, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t OPTIONMODE_parse_table[] = {
+static const ahc_reg_parse_entry_t OPTIONMODE_parse_table[] = {
{ "DIS_MSGIN_DUALEDGE", 0x01, 0x01 },
{ "AUTO_MSGOUT_DE", 0x02, 0x02 },
{ "SCSIDATL_IMGEN", 0x04, 0x04 },
@@ -190,7 +183,7 @@ ahc_targcrccnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0a, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t CLRSINT0_parse_table[] = {
+static const ahc_reg_parse_entry_t CLRSINT0_parse_table[] = {
{ "CLRSPIORDY", 0x02, 0x02 },
{ "CLRSWRAP", 0x08, 0x08 },
{ "CLRIOERR", 0x08, 0x08 },
@@ -206,7 +199,7 @@ ahc_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0b, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SSTAT0_parse_table[] = {
+static const ahc_reg_parse_entry_t SSTAT0_parse_table[] = {
{ "DMADONE", 0x01, 0x01 },
{ "SPIORDY", 0x02, 0x02 },
{ "SDONE", 0x04, 0x04 },
@@ -225,7 +218,7 @@ ahc_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0b, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t CLRSINT1_parse_table[] = {
+static const ahc_reg_parse_entry_t CLRSINT1_parse_table[] = {
{ "CLRREQINIT", 0x01, 0x01 },
{ "CLRPHASECHG", 0x02, 0x02 },
{ "CLRSCSIPERR", 0x04, 0x04 },
@@ -242,7 +235,7 @@ ahc_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0c, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SSTAT1_parse_table[] = {
+static const ahc_reg_parse_entry_t SSTAT1_parse_table[] = {
{ "REQINIT", 0x01, 0x01 },
{ "PHASECHG", 0x02, 0x02 },
{ "SCSIPERR", 0x04, 0x04 },
@@ -260,7 +253,7 @@ ahc_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0c, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SSTAT2_parse_table[] = {
+static const ahc_reg_parse_entry_t SSTAT2_parse_table[] = {
{ "DUAL_EDGE_ERR", 0x01, 0x01 },
{ "CRCREQERR", 0x02, 0x02 },
{ "CRCENDERR", 0x04, 0x04 },
@@ -278,7 +271,7 @@ ahc_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0d, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SSTAT3_parse_table[] = {
+static const ahc_reg_parse_entry_t SSTAT3_parse_table[] = {
{ "OFFCNT", 0x0f, 0x0f },
{ "U2OFFCNT", 0x7f, 0x7f },
{ "SCSICNT", 0xf0, 0xf0 }
@@ -291,7 +284,7 @@ ahc_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0e, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCSIID_ULTRA2_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSIID_ULTRA2_parse_table[] = {
{ "OID", 0x0f, 0x0f },
{ "TID", 0xf0, 0xf0 }
};
@@ -303,7 +296,7 @@ ahc_scsiid_ultra2_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0f, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SIMODE0_parse_table[] = {
+static const ahc_reg_parse_entry_t SIMODE0_parse_table[] = {
{ "ENDMADONE", 0x01, 0x01 },
{ "ENSPIORDY", 0x02, 0x02 },
{ "ENSDONE", 0x04, 0x04 },
@@ -321,7 +314,7 @@ ahc_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x10, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SIMODE1_parse_table[] = {
+static const ahc_reg_parse_entry_t SIMODE1_parse_table[] = {
{ "ENREQINIT", 0x01, 0x01 },
{ "ENPHASECHG", 0x02, 0x02 },
{ "ENSCSIPERR", 0x04, 0x04 },
@@ -347,33 +340,13 @@ ahc_scsibusl_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahc_scsibush_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCSIBUSH",
- 0x13, regvalue, cur_col, wrap));
-}
-
-static ahc_reg_parse_entry_t SXFRCTL2_parse_table[] = {
- { "CMDDMAEN", 0x08, 0x08 },
- { "AUTORSTDIS", 0x10, 0x10 },
- { "ASYNC_SETUP", 0x07, 0x07 }
-};
-
-int
-ahc_sxfrctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SXFRCTL2_parse_table, 3, "SXFRCTL2",
- 0x13, regvalue, cur_col, wrap));
-}
-
-int
ahc_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahc_print_register(NULL, 0, "SHADDR",
0x14, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SELTIMER_parse_table[] = {
+static const ahc_reg_parse_entry_t SELTIMER_parse_table[] = {
{ "STAGE1", 0x01, 0x01 },
{ "STAGE2", 0x02, 0x02 },
{ "STAGE3", 0x04, 0x04 },
@@ -389,7 +362,7 @@ ahc_seltimer_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x18, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SELID_parse_table[] = {
+static const ahc_reg_parse_entry_t SELID_parse_table[] = {
{ "ONEBIT", 0x08, 0x08 },
{ "SELID_MASK", 0xf0, 0xf0 }
};
@@ -401,21 +374,6 @@ ahc_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x19, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCAMCTL_parse_table[] = {
- { "DFLTTID", 0x10, 0x10 },
- { "ALTSTIM", 0x20, 0x20 },
- { "CLRSCAMSELID", 0x40, 0x40 },
- { "ENSCAMSELO", 0x80, 0x80 },
- { "SCAMLVL", 0x03, 0x03 }
-};
-
-int
-ahc_scamctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SCAMCTL_parse_table, 5, "SCAMCTL",
- 0x1a, regvalue, cur_col, wrap));
-}
-
int
ahc_targid_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
@@ -423,7 +381,7 @@ ahc_targid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x1b, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SPIOCAP_parse_table[] = {
+static const ahc_reg_parse_entry_t SPIOCAP_parse_table[] = {
{ "SSPIOCPS", 0x01, 0x01 },
{ "ROM", 0x02, 0x02 },
{ "EEPROM", 0x04, 0x04 },
@@ -441,7 +399,7 @@ ahc_spiocap_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x1b, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t BRDCTL_parse_table[] = {
+static const ahc_reg_parse_entry_t BRDCTL_parse_table[] = {
{ "BRDCTL0", 0x01, 0x01 },
{ "BRDSTB_ULTRA2", 0x01, 0x01 },
{ "BRDCTL1", 0x02, 0x02 },
@@ -464,7 +422,7 @@ ahc_brdctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x1d, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SEECTL_parse_table[] = {
+static const ahc_reg_parse_entry_t SEECTL_parse_table[] = {
{ "SEEDI", 0x01, 0x01 },
{ "SEEDO", 0x02, 0x02 },
{ "SEECK", 0x04, 0x04 },
@@ -482,7 +440,7 @@ ahc_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x1e, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SBLKCTL_parse_table[] = {
+static const ahc_reg_parse_entry_t SBLKCTL_parse_table[] = {
{ "XCVR", 0x01, 0x01 },
{ "SELWIDE", 0x02, 0x02 },
{ "ENAB20", 0x04, 0x04 },
@@ -522,13 +480,6 @@ ahc_disc_dsb_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahc_cmdsize_table_tail_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "CMDSIZE_TABLE_TAIL",
- 0x34, regvalue, cur_col, wrap));
-}
-
-int
ahc_mwi_residual_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahc_print_register(NULL, 0, "MWI_RESIDUAL",
@@ -549,7 +500,7 @@ ahc_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3a, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t DMAPARAMS_parse_table[] = {
+static const ahc_reg_parse_entry_t DMAPARAMS_parse_table[] = {
{ "FIFORESET", 0x01, 0x01 },
{ "FIFOFLUSH", 0x02, 0x02 },
{ "DIRECTION", 0x04, 0x04 },
@@ -569,7 +520,7 @@ ahc_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3b, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
+static const ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
{ "NO_DISCONNECT", 0x01, 0x01 },
{ "SPHASE_PENDING", 0x02, 0x02 },
{ "DPHASE_PENDING", 0x04, 0x04 },
@@ -602,7 +553,7 @@ ahc_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3e, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t LASTPHASE_parse_table[] = {
+static const ahc_reg_parse_entry_t LASTPHASE_parse_table[] = {
{ "MSGI", 0x20, 0x20 },
{ "IOI", 0x40, 0x40 },
{ "CDI", 0x80, 0x80 },
@@ -645,13 +596,6 @@ ahc_free_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahc_complete_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "COMPLETE_SCBH",
- 0x43, regvalue, cur_col, wrap));
-}
-
-int
ahc_hscb_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahc_print_register(NULL, 0, "HSCB_ADDR",
@@ -700,7 +644,7 @@ ahc_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x50, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t ARG_1_parse_table[] = {
+static const ahc_reg_parse_entry_t ARG_1_parse_table[] = {
{ "CONT_TARG_SESSION", 0x02, 0x02 },
{ "CONT_MSG_LOOP", 0x04, 0x04 },
{ "EXIT_MSG_LOOP", 0x08, 0x08 },
@@ -731,7 +675,7 @@ ahc_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x53, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
{ "ENAUTOATNP", 0x02, 0x02 },
{ "ENAUTOATNI", 0x04, 0x04 },
{ "ENAUTOATNO", 0x08, 0x08 },
@@ -747,7 +691,7 @@ ahc_scsiseq_template_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x54, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t HA_274_BIOSGLOBAL_parse_table[] = {
+static const ahc_reg_parse_entry_t HA_274_BIOSGLOBAL_parse_table[] = {
{ "HA_274_EXTENDED_TRANS",0x01, 0x01 }
};
@@ -758,7 +702,7 @@ ahc_ha_274_biosglobal_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x56, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
+static const ahc_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
{ "SCB_DMA", 0x01, 0x01 },
{ "TARGET_MSG_PENDING", 0x02, 0x02 }
};
@@ -770,7 +714,7 @@ ahc_seq_flags2_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x57, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCSICONF_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSICONF_parse_table[] = {
{ "ENSPCHK", 0x20, 0x20 },
{ "RESET_SCSI", 0x40, 0x40 },
{ "TERM_ENB", 0x80, 0x80 },
@@ -785,7 +729,7 @@ ahc_scsiconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5a, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t INTDEF_parse_table[] = {
+static const ahc_reg_parse_entry_t INTDEF_parse_table[] = {
{ "EDGE_TRIG", 0x80, 0x80 },
{ "VECTOR", 0x0f, 0x0f }
};
@@ -804,7 +748,7 @@ ahc_hostconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5d, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t HA_274_BIOSCTRL_parse_table[] = {
+static const ahc_reg_parse_entry_t HA_274_BIOSCTRL_parse_table[] = {
{ "CHANNEL_B_PRIMARY", 0x08, 0x08 },
{ "BIOSMODE", 0x30, 0x30 },
{ "BIOSDISABLED", 0x30, 0x30 }
@@ -817,7 +761,7 @@ ahc_ha_274_biosctrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5f, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SEQCTL_parse_table[] = {
+static const ahc_reg_parse_entry_t SEQCTL_parse_table[] = {
{ "LOADRAM", 0x01, 0x01 },
{ "SEQRESET", 0x02, 0x02 },
{ "STEP", 0x04, 0x04 },
@@ -849,7 +793,7 @@ ahc_seqaddr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x62, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SEQADDR1_parse_table[] = {
+static const ahc_reg_parse_entry_t SEQADDR1_parse_table[] = {
{ "SEQADDR1_MASK", 0x01, 0x01 }
};
@@ -902,7 +846,7 @@ ahc_none_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x6a, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t FLAGS_parse_table[] = {
+static const ahc_reg_parse_entry_t FLAGS_parse_table[] = {
{ "CARRY", 0x01, 0x01 },
{ "ZERO", 0x02, 0x02 }
};
@@ -929,13 +873,6 @@ ahc_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahc_function1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "FUNCTION1",
- 0x6e, regvalue, cur_col, wrap));
-}
-
-int
ahc_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahc_print_register(NULL, 0, "STACK",
@@ -956,19 +893,7 @@ ahc_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x70, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t BCTL_parse_table[] = {
- { "ENABLE", 0x01, 0x01 },
- { "ACE", 0x08, 0x08 }
-};
-
-int
-ahc_bctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(BCTL_parse_table, 2, "BCTL",
- 0x84, regvalue, cur_col, wrap));
-}
-
-static ahc_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
+static const ahc_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
{ "CIOPARCKEN", 0x01, 0x01 },
{ "USCBSIZE32", 0x02, 0x02 },
{ "RAMPS", 0x04, 0x04 },
@@ -986,7 +911,7 @@ ahc_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x84, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t BUSTIME_parse_table[] = {
+static const ahc_reg_parse_entry_t BUSTIME_parse_table[] = {
{ "BON", 0x0f, 0x0f },
{ "BOFF", 0xf0, 0xf0 }
};
@@ -998,7 +923,7 @@ ahc_bustime_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x85, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t DSCOMMAND1_parse_table[] = {
+static const ahc_reg_parse_entry_t DSCOMMAND1_parse_table[] = {
{ "HADDLDSEL0", 0x01, 0x01 },
{ "HADDLDSEL1", 0x02, 0x02 },
{ "DSLATT", 0xfc, 0xfc }
@@ -1011,7 +936,7 @@ ahc_dscommand1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x85, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t BUSSPD_parse_table[] = {
+static const ahc_reg_parse_entry_t BUSSPD_parse_table[] = {
{ "STBON", 0x07, 0x07 },
{ "STBOFF", 0x38, 0x38 },
{ "DFTHRSH_75", 0x80, 0x80 },
@@ -1026,7 +951,7 @@ ahc_busspd_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x86, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
+static const ahc_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
{ "SEQ_MAILBOX", 0x0f, 0x0f },
{ "HOST_TQINPOS", 0x80, 0x80 },
{ "HOST_MAILBOX", 0xf0, 0xf0 }
@@ -1039,7 +964,7 @@ ahc_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x86, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t DSPCISTATUS_parse_table[] = {
+static const ahc_reg_parse_entry_t DSPCISTATUS_parse_table[] = {
{ "DFTHRSH_100", 0xc0, 0xc0 }
};
@@ -1050,7 +975,7 @@ ahc_dspcistatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x86, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t HCNTRL_parse_table[] = {
+static const ahc_reg_parse_entry_t HCNTRL_parse_table[] = {
{ "CHIPRST", 0x01, 0x01 },
{ "CHIPRSTACK", 0x01, 0x01 },
{ "INTEN", 0x02, 0x02 },
@@ -1088,7 +1013,7 @@ ahc_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x90, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t INTSTAT_parse_table[] = {
+static const ahc_reg_parse_entry_t INTSTAT_parse_table[] = {
{ "SEQINT", 0x01, 0x01 },
{ "CMDCMPLT", 0x02, 0x02 },
{ "SCSIINT", 0x04, 0x04 },
@@ -1119,7 +1044,7 @@ ahc_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x91, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t CLRINT_parse_table[] = {
+static const ahc_reg_parse_entry_t CLRINT_parse_table[] = {
{ "CLRSEQINT", 0x01, 0x01 },
{ "CLRCMDINT", 0x02, 0x02 },
{ "CLRSCSIINT", 0x04, 0x04 },
@@ -1134,7 +1059,7 @@ ahc_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x92, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t ERROR_parse_table[] = {
+static const ahc_reg_parse_entry_t ERROR_parse_table[] = {
{ "ILLHADDR", 0x01, 0x01 },
{ "ILLSADDR", 0x02, 0x02 },
{ "ILLOPCODE", 0x04, 0x04 },
@@ -1152,7 +1077,7 @@ ahc_error_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x92, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t DFCNTRL_parse_table[] = {
+static const ahc_reg_parse_entry_t DFCNTRL_parse_table[] = {
{ "FIFORESET", 0x01, 0x01 },
{ "FIFOFLUSH", 0x02, 0x02 },
{ "DIRECTION", 0x04, 0x04 },
@@ -1172,7 +1097,7 @@ ahc_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x93, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t DFSTATUS_parse_table[] = {
+static const ahc_reg_parse_entry_t DFSTATUS_parse_table[] = {
{ "FIFOEMP", 0x01, 0x01 },
{ "FIFOFULL", 0x02, 0x02 },
{ "DFTHRESH", 0x04, 0x04 },
@@ -1198,20 +1123,13 @@ ahc_dfwaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahc_dfraddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "DFRADDR",
- 0x97, regvalue, cur_col, wrap));
-}
-
-int
ahc_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahc_print_register(NULL, 0, "DFDAT",
0x99, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCBCNT_parse_table[] = {
+static const ahc_reg_parse_entry_t SCBCNT_parse_table[] = {
{ "SCBAUTO", 0x80, 0x80 },
{ "SCBCNT_MASK", 0x1f, 0x1f }
};
@@ -1231,20 +1149,13 @@ ahc_qinfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahc_qincnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "QINCNT",
- 0x9c, regvalue, cur_col, wrap));
-}
-
-int
ahc_qoutfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahc_print_register(NULL, 0, "QOUTFIFO",
0x9d, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t CRCCONTROL1_parse_table[] = {
+static const ahc_reg_parse_entry_t CRCCONTROL1_parse_table[] = {
{ "TARGCRCCNTEN", 0x04, 0x04 },
{ "TARGCRCENDEN", 0x08, 0x08 },
{ "CRCREQCHKEN", 0x10, 0x10 },
@@ -1260,14 +1171,7 @@ ahc_crccontrol1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x9d, regvalue, cur_col, wrap));
}
-int
-ahc_qoutcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "QOUTCNT",
- 0x9e, regvalue, cur_col, wrap));
-}
-
-static ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = {
{ "DATA_OUT_PHASE", 0x01, 0x01 },
{ "DATA_IN_PHASE", 0x02, 0x02 },
{ "MSG_OUT_PHASE", 0x04, 0x04 },
@@ -1284,7 +1188,7 @@ ahc_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x9e, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SFUNCT_parse_table[] = {
+static const ahc_reg_parse_entry_t SFUNCT_parse_table[] = {
{ "ALT_MODE", 0x80, 0x80 }
};
@@ -1351,7 +1255,7 @@ ahc_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xac, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
+static const ahc_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
{ "SG_LAST_SEG", 0x80, 0x80 },
{ "SG_HIGH_ADDR_BITS", 0x7f, 0x7f }
};
@@ -1363,7 +1267,7 @@ ahc_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xb0, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
+static const ahc_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
{ "SG_LIST_NULL", 0x01, 0x01 },
{ "SG_FULL_RESID", 0x02, 0x02 },
{ "SG_RESID_VALID", 0x04, 0x04 }
@@ -1376,7 +1280,7 @@ ahc_scb_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xb4, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
+static const ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
{ "DISCONNECTED", 0x04, 0x04 },
{ "ULTRAENB", 0x08, 0x08 },
{ "MK_MESSAGE", 0x10, 0x10 },
@@ -1394,7 +1298,7 @@ ahc_scb_control_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xb8, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
+static const ahc_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
{ "TWIN_CHNLB", 0x80, 0x80 },
{ "OID", 0x0f, 0x0f },
{ "TWIN_TID", 0x70, 0x70 },
@@ -1408,7 +1312,7 @@ ahc_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xb9, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCB_LUN_parse_table[] = {
+static const ahc_reg_parse_entry_t SCB_LUN_parse_table[] = {
{ "SCB_XFERLEN_ODD", 0x80, 0x80 },
{ "LID", 0x3f, 0x3f }
};
@@ -1455,14 +1359,7 @@ ahc_scb_next_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xbf, regvalue, cur_col, wrap));
}
-int
-ahc_scb_64_spare_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCB_64_SPARE",
- 0xc0, regvalue, cur_col, wrap));
-}
-
-static ahc_reg_parse_entry_t SEECTL_2840_parse_table[] = {
+static const ahc_reg_parse_entry_t SEECTL_2840_parse_table[] = {
{ "DO_2840", 0x01, 0x01 },
{ "CK_2840", 0x02, 0x02 },
{ "CS_2840", 0x04, 0x04 }
@@ -1475,7 +1372,7 @@ ahc_seectl_2840_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xc0, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t STATUS_2840_parse_table[] = {
+static const ahc_reg_parse_entry_t STATUS_2840_parse_table[] = {
{ "DI_2840", 0x01, 0x01 },
{ "EEPROM_TF", 0x80, 0x80 },
{ "ADSEL", 0x1e, 0x1e },
@@ -1524,7 +1421,7 @@ ahc_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xea, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t CCSGCTL_parse_table[] = {
+static const ahc_reg_parse_entry_t CCSGCTL_parse_table[] = {
{ "CCSGRESET", 0x01, 0x01 },
{ "SG_FETCH_NEEDED", 0x02, 0x02 },
{ "CCSGEN", 0x08, 0x08 },
@@ -1552,7 +1449,7 @@ ahc_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xed, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t CCSCBCTL_parse_table[] = {
+static const ahc_reg_parse_entry_t CCSCBCTL_parse_table[] = {
{ "CCSCBRESET", 0x01, 0x01 },
{ "CCSCBDIR", 0x04, 0x04 },
{ "CCSCBEN", 0x08, 0x08 },
@@ -1610,7 +1507,7 @@ ahc_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xf8, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
+static const ahc_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
{ "SDSCB_ROLLOVER", 0x10, 0x10 },
{ "SNSCB_ROLLOVER", 0x20, 0x20 },
{ "SCB_AVAIL", 0x40, 0x40 },
@@ -1625,7 +1522,7 @@ ahc_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xfa, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t DFF_THRSH_parse_table[] = {
+static const ahc_reg_parse_entry_t DFF_THRSH_parse_table[] = {
{ "RD_DFTHRSH_MIN", 0x00, 0x00 },
{ "WR_DFTHRSH_MIN", 0x00, 0x00 },
{ "RD_DFTHRSH_25", 0x01, 0x01 },
@@ -1653,7 +1550,7 @@ ahc_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xfb, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
+static const ahc_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
{ "LAST_SEG_DONE", 0x01, 0x01 },
{ "LAST_SEG", 0x02, 0x02 },
{ "SG_ADDR_MASK", 0xf8, 0xf8 }
@@ -1666,7 +1563,7 @@ ahc_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xfc, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
+static const ahc_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
{ "LAST_SEG_DONE", 0x01, 0x01 },
{ "LAST_SEG", 0x02, 0x02 },
{ "SG_ADDR_MASK", 0xf8, 0xf8 }
diff --git a/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
index 4cee08521e75..07e93fbae706 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
@@ -5,7 +5,7 @@
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
*/
-static uint8_t seqprog[] = {
+static const uint8_t seqprog[] = {
0xb2, 0x00, 0x00, 0x08,
0xf7, 0x11, 0x22, 0x08,
0x00, 0x65, 0xee, 0x59,
@@ -1081,7 +1081,7 @@ ahc_patch0_func(struct ahc_softc *ahc)
return (0);
}
-static struct patch {
+static const struct patch {
ahc_patch_func_t *patch_func;
uint32_t begin :10,
skip_instr :10,
@@ -1291,7 +1291,7 @@ static struct patch {
{ ahc_patch4_func, 865, 12, 1 }
};
-static struct cs {
+static const struct cs {
uint16_t begin;
uint16_t end;
} critical_sections[] = {
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c
index 924102720b14..e4a778720301 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c
@@ -362,7 +362,7 @@ output_code()
" *\n"
"%s */\n", versions);
- fprintf(ofile, "static uint8_t seqprog[] = {\n");
+ fprintf(ofile, "static const uint8_t seqprog[] = {\n");
for (cur_instr = STAILQ_FIRST(&seq_program);
cur_instr != NULL;
cur_instr = STAILQ_NEXT(cur_instr, links)) {
@@ -415,7 +415,7 @@ output_code()
}
fprintf(ofile,
-"static struct patch {\n"
+"static const struct patch {\n"
" %spatch_func_t *patch_func;\n"
" uint32_t begin :10,\n"
" skip_instr :10,\n"
@@ -435,7 +435,7 @@ output_code()
fprintf(ofile, "\n};\n\n");
fprintf(ofile,
-"static struct cs {\n"
+"static const struct cs {\n"
" uint16_t begin;\n"
" uint16_t end;\n"
"} critical_sections[] = {\n");
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index 702e2dbd11fb..81be6a261cc8 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -101,11 +101,12 @@ static void format_3_instr(int opcode, symbol_ref_t *src,
expression_t *immed, symbol_ref_t *address);
static void test_readable_symbol(symbol_t *symbol);
static void test_writable_symbol(symbol_t *symbol);
-static void type_check(symbol_t *symbol, expression_t *expression, int and_op);
+static void type_check(symbol_ref_t *sym, expression_t *expression, int and_op);
static void make_expression(expression_t *immed, int value);
static void add_conditional(symbol_t *symbol);
static void add_version(const char *verstring);
static int is_download_const(expression_t *immed);
+static int is_location_address(symbol_t *symbol);
void yyerror(const char *string);
#define SRAM_SYMNAME "SRAM_BASE"
@@ -142,6 +143,8 @@ void yyerror(const char *string);
%token <value> T_ADDRESS
+%token T_COUNT
+
%token T_ACCESS_MODE
%token T_MODES
@@ -192,10 +195,10 @@ void yyerror(const char *string);
%token <value> T_OR
-/* 16 bit extensions */
-%token <value> T_OR16 T_AND16 T_XOR16 T_ADD16
-%token <value> T_ADC16 T_MVI16 T_TEST16 T_CMP16 T_CMPXCHG
-
+/* 16 bit extensions, not implemented
+ * %token <value> T_OR16 T_AND16 T_XOR16 T_ADD16
+ * %token <value> T_ADC16 T_MVI16 T_TEST16 T_CMP16 T_CMPXCHG
+ */
%token T_RET
%token T_NOP
@@ -214,7 +217,7 @@ void yyerror(const char *string);
%type <expression> expression immediate immediate_or_a
-%type <value> export ret f1_opcode f2_opcode f4_opcode jmp_jc_jnc_call jz_jnz je_jne
+%type <value> export ret f1_opcode f2_opcode jmp_jc_jnc_call jz_jnz je_jne
%type <value> mode_value mode_list macro_arglist
@@ -313,13 +316,13 @@ reg_definition:
stop("Register multiply defined", EX_DATAERR);
/* NOTREACHED */
}
- cur_symbol = $1;
+ cur_symbol = $1;
cur_symbol->type = cur_symtype;
initialize_symbol(cur_symbol);
}
reg_attribute_list
'}'
- {
+ {
/*
* Default to allowing everything in for registers
* with no bit or mask definitions.
@@ -349,9 +352,10 @@ reg_attribute_list:
| reg_attribute_list reg_attribute
;
-reg_attribute:
+reg_attribute:
reg_address
| size
+| count
| access_mode
| modes
| field_defn
@@ -392,6 +396,13 @@ size:
}
;
+count:
+ T_COUNT T_NUMBER
+ {
+ cur_symbol->count += $2;
+ }
+;
+
access_mode:
T_ACCESS_MODE T_MODE
{
@@ -641,14 +652,14 @@ expression:
&($1.referenced_syms),
&($3.referenced_syms));
}
-| expression T_EXPR_LSHIFT expression
+| expression T_EXPR_LSHIFT expression
{
$$.value = $1.value << $3.value;
symlist_merge(&$$.referenced_syms,
&$1.referenced_syms,
&$3.referenced_syms);
}
-| expression T_EXPR_RSHIFT expression
+| expression T_EXPR_RSHIFT expression
{
$$.value = $1.value >> $3.value;
symlist_merge(&$$.referenced_syms,
@@ -714,7 +725,7 @@ expression:
;
constant:
- T_CONST T_SYMBOL expression
+ T_CONST T_SYMBOL expression
{
if ($2->type != UNINITIALIZED) {
stop("Re-definition of symbol as a constant",
@@ -800,6 +811,7 @@ scratch_ram:
cur_symtype = SRAMLOC;
cur_symbol->type = SRAMLOC;
initialize_symbol(cur_symbol);
+ cur_symbol->count += 1;
}
reg_address
{
@@ -831,6 +843,7 @@ scb:
initialize_symbol(cur_symbol);
/* 64 bytes of SCB space */
cur_symbol->info.rinfo->size = 64;
+ cur_symbol->count += 1;
}
reg_address
{
@@ -1311,14 +1324,18 @@ f2_opcode:
| T_ROR { $$ = AIC_OP_ROR; }
;
-f4_opcode:
- T_OR16 { $$ = AIC_OP_OR16; }
-| T_AND16 { $$ = AIC_OP_AND16; }
-| T_XOR16 { $$ = AIC_OP_XOR16; }
-| T_ADD16 { $$ = AIC_OP_ADD16; }
-| T_ADC16 { $$ = AIC_OP_ADC16; }
-| T_MVI16 { $$ = AIC_OP_MVI16; }
-;
+/*
+ * 16bit opcodes, not used
+ *
+ *f4_opcode:
+ * T_OR16 { $$ = AIC_OP_OR16; }
+ *| T_AND16 { $$ = AIC_OP_AND16; }
+ *| T_XOR16 { $$ = AIC_OP_XOR16; }
+ *| T_ADD16 { $$ = AIC_OP_ADD16; }
+ *| T_ADC16 { $$ = AIC_OP_ADC16; }
+ *| T_MVI16 { $$ = AIC_OP_MVI16; }
+ *;
+ */
code:
f2_opcode destination ',' expression opt_source ret ';'
@@ -1357,6 +1374,7 @@ code:
code:
T_OR reg_symbol ',' immediate jmp_jc_jnc_call address ';'
{
+ type_check(&$2, &$4, AIC_OP_OR);
format_3_instr($5, &$2, &$4, &$6);
}
;
@@ -1528,7 +1546,7 @@ initialize_symbol(symbol_t *symbol)
sizeof(struct cond_info));
break;
case MACRO:
- symbol->info.macroinfo =
+ symbol->info.macroinfo =
(struct macro_info *)malloc(sizeof(struct macro_info));
if (symbol->info.macroinfo == NULL) {
stop("Can't create macro info", EX_SOFTWARE);
@@ -1552,7 +1570,6 @@ add_macro_arg(const char *argtext, int argnum)
struct macro_arg *marg;
int i;
int retval;
-
if (cur_symbol == NULL || cur_symbol->type != MACRO) {
stop("Invalid current symbol for adding macro arg",
@@ -1633,8 +1650,10 @@ format_1_instr(int opcode, symbol_ref_t *dest, expression_t *immed,
test_writable_symbol(dest->symbol);
test_readable_symbol(src->symbol);
- /* Ensure that immediate makes sense for this destination */
- type_check(dest->symbol, immed, opcode);
+ if (!is_location_address(dest->symbol)) {
+ /* Ensure that immediate makes sense for this destination */
+ type_check(dest, immed, opcode);
+ }
/* Allocate sequencer space for the instruction and fill it out */
instr = seq_alloc();
@@ -1766,9 +1785,6 @@ format_3_instr(int opcode, symbol_ref_t *src,
/* Test register permissions */
test_readable_symbol(src->symbol);
- /* Ensure that immediate makes sense for this source */
- type_check(src->symbol, immed, opcode);
-
/* Allocate sequencer space for the instruction and fill it out */
instr = seq_alloc();
f3_instr = &instr->format.format3;
@@ -1797,7 +1813,6 @@ format_3_instr(int opcode, symbol_ref_t *src,
static void
test_readable_symbol(symbol_t *symbol)
{
-
if ((symbol->info.rinfo->modes & (0x1 << src_mode)) == 0) {
snprintf(errbuf, sizeof(errbuf),
"Register %s unavailable in source reg mode %d",
@@ -1815,7 +1830,6 @@ test_readable_symbol(symbol_t *symbol)
static void
test_writable_symbol(symbol_t *symbol)
{
-
if ((symbol->info.rinfo->modes & (0x1 << dst_mode)) == 0) {
snprintf(errbuf, sizeof(errbuf),
"Register %s unavailable in destination reg mode %d",
@@ -1831,25 +1845,34 @@ test_writable_symbol(symbol_t *symbol)
}
static void
-type_check(symbol_t *symbol, expression_t *expression, int opcode)
+type_check(symbol_ref_t *sym, expression_t *expression, int opcode)
{
+ symbol_t *symbol = sym->symbol;
symbol_node_t *node;
int and_op;
+ int8_t value, mask;
and_op = FALSE;
- if (opcode == AIC_OP_AND || opcode == AIC_OP_JNZ || opcode == AIC_OP_JZ)
- and_op = TRUE;
-
/*
* Make sure that we aren't attempting to write something
* that hasn't been defined. If this is an and operation,
* this is a mask, so "undefined" bits are okay.
*/
- if (and_op == FALSE
- && (expression->value & ~symbol->info.rinfo->valid_bitmask) != 0) {
+ if (opcode == AIC_OP_AND || opcode == AIC_OP_JNZ ||
+ opcode == AIC_OP_JZ || opcode == AIC_OP_JNE ||
+ opcode == AIC_OP_BMOV)
+ and_op = TRUE;
+
+ /*
+ * Defaulting to 8 bit logic
+ */
+ mask = (int8_t)~symbol->info.rinfo->valid_bitmask;
+ value = (int8_t)expression->value;
+
+ if (and_op == FALSE && (mask & value) != 0 ) {
snprintf(errbuf, sizeof(errbuf),
"Invalid bit(s) 0x%x in immediate written to %s",
- expression->value & ~symbol->info.rinfo->valid_bitmask,
+ (mask & value),
symbol->name);
stop(errbuf, EX_DATAERR);
/* NOTREACHED */
@@ -1959,3 +1982,13 @@ is_download_const(expression_t *immed)
return (FALSE);
}
+
+static int
+is_location_address(symbol_t *sym)
+{
+ if (sym->type == SCBLOC ||
+ sym->type == SRAMLOC)
+ return (TRUE);
+ return (FALSE);
+}
+
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
index 7c3983f868a9..2c7f02daf88d 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
@@ -162,6 +162,7 @@ register { return T_REGISTER; }
const { yylval.value = FALSE; return T_CONST; }
download { return T_DOWNLOAD; }
address { return T_ADDRESS; }
+count { return T_COUNT; }
access_mode { return T_ACCESS_MODE; }
modes { return T_MODES; }
RW|RO|WO {
@@ -228,15 +229,15 @@ ret { return T_RET; }
nop { return T_NOP; }
/* ARP2 16bit extensions */
-or16 { return T_OR16; }
-and16 { return T_AND16; }
-xor16 { return T_XOR16; }
-add16 { return T_ADD16; }
-adc16 { return T_ADC16; }
-mvi16 { return T_MVI16; }
-test16 { return T_TEST16; }
-cmp16 { return T_CMP16; }
-cmpxchg { return T_CMPXCHG; }
+ /* or16 { return T_OR16; } */
+ /* and16 { return T_AND16; }*/
+ /* xor16 { return T_XOR16; }*/
+ /* add16 { return T_ADD16; }*/
+ /* adc16 { return T_ADC16; }*/
+ /* mvi16 { return T_MVI16; }*/
+ /* test16 { return T_TEST16; }*/
+ /* cmp16 { return T_CMP16; }*/
+ /* cmpxchg { return T_CMPXCHG; }*/
/* Allowed Symbols */
\<\< { return T_EXPR_LSHIFT; }
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
index f1f448dff569..fcd357872b43 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
@@ -77,6 +77,7 @@ symbol_create(char *name)
if (new_symbol->name == NULL)
stop("Unable to strdup symbol name", EX_SOFTWARE);
new_symbol->type = UNINITIALIZED;
+ new_symbol->count = 1;
return (new_symbol);
}
@@ -198,6 +199,12 @@ symtable_get(char *name)
}
}
memcpy(&stored_ptr, data.data, sizeof(stored_ptr));
+ stored_ptr->count++;
+ data.data = &stored_ptr;
+ if (symtable->put(symtable, &key, &data, /*flags*/0) !=0) {
+ perror("Symtable put failed");
+ exit(EX_SOFTWARE);
+ }
return (stored_ptr);
}
@@ -256,7 +263,7 @@ symlist_add(symlist_t *symlist, symbol_t *symbol, int how)
&& (curnode->symbol->info.finfo->value >
newnode->symbol->info.finfo->value))))
|| (!field && (curnode->symbol->info.rinfo->address >
- newnode->symbol->info.rinfo->address))) {
+ newnode->symbol->info.rinfo->address))) {
SLIST_INSERT_HEAD(symlist, newnode, links);
return;
}
@@ -271,7 +278,7 @@ symlist_add(symlist_t *symlist, symbol_t *symbol, int how)
cursymbol = SLIST_NEXT(curnode, links)->symbol;
if ((field
- && (cursymbol->type > symbol->type
+ && (cursymbol->type > symbol->type
|| (cursymbol->type == symbol->type
&& (cursymbol->info.finfo->value >
symbol->info.finfo->value))))
@@ -351,7 +358,7 @@ aic_print_reg_dump_types(FILE *ofile)
{
if (ofile == NULL)
return;
-
+
fprintf(ofile,
"typedef int (%sreg_print_t)(u_int, u_int *, u_int);\n"
"typedef struct %sreg_parse_entry {\n"
@@ -370,7 +377,7 @@ aic_print_reg_dump_start(FILE *dfile, symbol_node_t *regnode)
return;
fprintf(dfile,
-"static %sreg_parse_entry_t %s_parse_table[] = {\n",
+"static const %sreg_parse_entry_t %s_parse_table[] = {\n",
prefix,
regnode->symbol->name);
}
@@ -385,7 +392,7 @@ aic_print_reg_dump_end(FILE *ofile, FILE *dfile,
lower_name = strdup(regnode->symbol->name);
if (lower_name == NULL)
stop("Unable to strdup symbol name", EX_SOFTWARE);
-
+
for (letter = lower_name; *letter != '\0'; letter++)
*letter = tolower(*letter);
@@ -472,6 +479,7 @@ symtable_dump(FILE *ofile, FILE *dfile)
DBT key;
DBT data;
int flag;
+ int reg_count = 0, reg_used = 0;
u_int i;
if (symtable == NULL)
@@ -541,6 +549,9 @@ symtable_dump(FILE *ofile, FILE *dfile)
int num_entries;
num_entries = 0;
+ reg_count++;
+ if (curnode->symbol->count == 1)
+ break;
fields = &curnode->symbol->info.rinfo->fields;
SLIST_FOREACH(fieldnode, fields, links) {
if (num_entries == 0)
@@ -553,11 +564,14 @@ symtable_dump(FILE *ofile, FILE *dfile)
}
aic_print_reg_dump_end(ofile, dfile,
curnode, num_entries);
+ reg_used++;
}
default:
break;
}
}
+ fprintf(stderr, "%s: %d of %d register definitions used\n", appname,
+ reg_used, reg_count);
/* Fold in the masks and bits */
while (SLIST_FIRST(&masks) != NULL) {
@@ -646,7 +660,6 @@ symtable_dump(FILE *ofile, FILE *dfile)
free(curnode);
}
-
fprintf(ofile, "\n\n/* Downloaded Constant Definitions */\n");
for (i = 0; SLIST_FIRST(&download_constants) != NULL; i++) {
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
index afc22e8b4903..05190c1a2fb7 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
@@ -128,6 +128,7 @@ typedef struct expression_info {
typedef struct symbol {
char *name;
symtype type;
+ int count;
union {
struct reg_info *rinfo;
struct field_info *finfo;
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 8be3d76656fa..a73a6bbb1b2b 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -2286,17 +2286,14 @@ static void flush_dev(struct scsi_device *dev, unsigned long cursec,
}
}
-static irqreturn_t ihdlr(int irq, struct Scsi_Host *shost)
+static irqreturn_t ihdlr(struct Scsi_Host *shost)
{
struct scsi_cmnd *SCpnt;
unsigned int i, k, c, status, tstatus, reg;
struct mssp *spp;
struct mscp *cpp;
struct hostdata *ha = (struct hostdata *)shost->hostdata;
-
- if (shost->irq != irq)
- panic("%s: ihdlr, irq %d, shost->irq %d.\n", ha->board_name, irq,
- shost->irq);
+ int irq = shost->irq;
/* Check if this board need to be serviced */
if (!(inb(shost->io_port + REG_AUX_STATUS) & IRQ_ASSERTED))
@@ -2535,7 +2532,7 @@ static irqreturn_t ihdlr(int irq, struct Scsi_Host *shost)
return IRQ_NONE;
}
-static irqreturn_t do_interrupt_handler(int irq, void *shap)
+static irqreturn_t do_interrupt_handler(int dummy, void *shap)
{
struct Scsi_Host *shost;
unsigned int j;
@@ -2548,7 +2545,7 @@ static irqreturn_t do_interrupt_handler(int irq, void *shap)
shost = sh[j];
spin_lock_irqsave(shost->host_lock, spin_flags);
- ret = ihdlr(irq, shost);
+ ret = ihdlr(shost);
spin_unlock_irqrestore(shost->host_lock, spin_flags);
return ret;
}
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index bfdee5968892..a0b6d414953d 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -978,7 +978,7 @@ static int esp_check_spur_intr(struct esp *esp)
*/
if (!esp->ops->dma_error(esp)) {
printk(KERN_ERR PFX "esp%d: Spurious irq, "
- "sreg=%x.\n",
+ "sreg=%02x.\n",
esp->host->unique_id, esp->sreg);
return -1;
}
@@ -1447,6 +1447,9 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
if (offset > 15)
goto do_reject;
+ if (esp->flags & ESP_FLAG_DISABLE_SYNC)
+ offset = 0;
+
if (offset) {
int rounded_up, one_clock;
@@ -1697,7 +1700,12 @@ again:
else
ent->flags &= ~ESP_CMD_FLAG_WRITE;
- dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
+ if (esp->ops->dma_length_limit)
+ dma_len = esp->ops->dma_length_limit(esp, dma_addr,
+ dma_len);
+ else
+ dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
+
esp->data_dma_len = dma_len;
if (!dma_len) {
@@ -1761,7 +1769,6 @@ again:
esp_advance_dma(esp, ent, cmd, bytes_sent);
esp_event(esp, ESP_EVENT_CHECK_PHASE);
goto again;
- break;
}
case ESP_EVENT_STATUS: {
@@ -2235,7 +2242,7 @@ static void esp_bootup_reset(struct esp *esp)
static void esp_set_clock_params(struct esp *esp)
{
- int fmhz;
+ int fhz;
u8 ccf;
/* This is getting messy but it has to be done correctly or else
@@ -2270,9 +2277,9 @@ static void esp_set_clock_params(struct esp *esp)
* This entails the smallest and largest sync period we could ever
* handle on this ESP.
*/
- fmhz = esp->cfreq;
+ fhz = esp->cfreq;
- ccf = ((fmhz / 1000000) + 4) / 5;
+ ccf = ((fhz / 1000000) + 4) / 5;
if (ccf == 1)
ccf = 2;
@@ -2281,16 +2288,16 @@ static void esp_set_clock_params(struct esp *esp)
* been unable to find the clock-frequency PROM property. All
* other machines provide useful values it seems.
*/
- if (fmhz <= 5000000 || ccf < 1 || ccf > 8) {
- fmhz = 20000000;
+ if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
+ fhz = 20000000;
ccf = 4;
}
esp->cfact = (ccf == 8 ? 0 : ccf);
- esp->cfreq = fmhz;
- esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz);
+ esp->cfreq = fhz;
+ esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
esp->ctick = ESP_TICK(ccf, esp->ccycle);
- esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf);
+ esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
esp->sync_defp = SYNC_DEFP_SLOW;
}
@@ -2382,6 +2389,12 @@ static int esp_slave_configure(struct scsi_device *dev)
struct esp_target_data *tp = &esp->target[dev->id];
int goal_tags, queue_depth;
+ if (esp->flags & ESP_FLAG_DISABLE_SYNC) {
+ /* Bypass async domain validation */
+ dev->ppr = 0;
+ dev->sdtr = 0;
+ }
+
goal_tags = 0;
if (dev->tagged_supported) {
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index d5576d54ce76..bb43a1388188 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -224,7 +224,7 @@
#define ESP_TIMEO_CONST 8192
#define ESP_NEG_DEFP(mhz, cfact) \
((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
-#define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000))
+#define ESP_HZ_TO_CYCLE(hertz) ((1000000000) / ((hertz) / 1000))
#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000))
/* For slow to medium speed input clock rates we shoot for 5mb/s, but for high
@@ -240,9 +240,9 @@ struct esp_cmd_priv {
int num_sg;
} u;
- unsigned int cur_residue;
+ int cur_residue;
struct scatterlist *cur_sg;
- unsigned int tot_residue;
+ int tot_residue;
};
#define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp))
@@ -368,6 +368,12 @@ struct esp_driver_ops {
*/
int (*irq_pending)(struct esp *esp);
+ /* Return the maximum allowable size of a DMA transfer for a
+ * given buffer.
+ */
+ u32 (*dma_length_limit)(struct esp *esp, u32 dma_addr,
+ u32 dma_len);
+
/* Reset the DMA engine entirely. On return, ESP interrupts
* should be enabled. Often the interrupt enabling is
* controlled in the DMA engine.
@@ -471,6 +477,7 @@ struct esp {
#define ESP_FLAG_DOING_SLOWCMD 0x00000004
#define ESP_FLAG_WIDE_CAPABLE 0x00000008
#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010
+#define ESP_FLAG_DISABLE_SYNC 0x00000020
u8 select_state;
#define ESP_SELECT_NONE 0x00 /* Not selecting */
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index c264a8c5f01e..3690360d7a79 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -199,9 +199,13 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
if (!shost->can_queue) {
printk(KERN_ERR "%s: can_queue = 0 no longer supported\n",
sht->name);
- goto out;
+ goto fail;
}
+ error = scsi_setup_command_freelist(shost);
+ if (error)
+ goto fail;
+
if (!shost->shost_gendev.parent)
shost->shost_gendev.parent = dev ? dev : &platform_bus;
@@ -255,6 +259,8 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
out_del_gendev:
device_del(&shost->shost_gendev);
out:
+ scsi_destroy_command_freelist(shost);
+ fail:
return error;
}
EXPORT_SYMBOL(scsi_add_host);
@@ -284,6 +290,11 @@ static void scsi_host_dev_release(struct device *dev)
kfree(shost);
}
+struct device_type scsi_host_type = {
+ .name = "scsi_host",
+ .release = scsi_host_dev_release,
+};
+
/**
* scsi_host_alloc - register a scsi host adapter instance.
* @sht: pointer to scsi host template
@@ -376,33 +387,31 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
else
shost->dma_boundary = 0xffffffff;
- rval = scsi_setup_command_freelist(shost);
- if (rval)
- goto fail_kfree;
-
device_initialize(&shost->shost_gendev);
snprintf(shost->shost_gendev.bus_id, BUS_ID_SIZE, "host%d",
shost->host_no);
- shost->shost_gendev.release = scsi_host_dev_release;
+#ifndef CONFIG_SYSFS_DEPRECATED
+ shost->shost_gendev.bus = &scsi_bus_type;
+#endif
+ shost->shost_gendev.type = &scsi_host_type;
device_initialize(&shost->shost_dev);
shost->shost_dev.parent = &shost->shost_gendev;
shost->shost_dev.class = &shost_class;
snprintf(shost->shost_dev.bus_id, BUS_ID_SIZE, "host%d",
shost->host_no);
+ shost->shost_dev.groups = scsi_sysfs_shost_attr_groups;
shost->ehandler = kthread_run(scsi_error_handler, shost,
"scsi_eh_%d", shost->host_no);
if (IS_ERR(shost->ehandler)) {
rval = PTR_ERR(shost->ehandler);
- goto fail_destroy_freelist;
+ goto fail_kfree;
}
scsi_proc_hostdir_add(shost->hostt);
return shost;
- fail_destroy_freelist:
- scsi_destroy_command_freelist(shost);
fail_kfree:
kfree(shost);
return NULL;
@@ -496,7 +505,7 @@ void scsi_exit_hosts(void)
int scsi_is_host_device(const struct device *dev)
{
- return dev->release == scsi_host_dev_release;
+ return dev->type == &scsi_host_type;
}
EXPORT_SYMBOL(scsi_is_host_device);
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 3638fa808ded..32553639aded 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -258,8 +258,7 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
/* force an abort */
- hwif->OUTB(WIN_IDLEIMMEDIATE,
- hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(WIN_IDLEIMMEDIATE, hwif->io_ports.command_addr);
rq->errors++;
@@ -410,9 +409,9 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
idescsi_end_request (drive, 1, 0);
return ide_stopped;
}
- bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
- hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+ bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
+ hwif->INB(hwif->io_ports.lbam_addr);
+ ireason = hwif->INB(hwif->io_ports.nsect_addr);
if (ireason & CD) {
printk(KERN_ERR "ide-scsi: CoD != 0 in idescsi_pc_intr\n");
@@ -485,7 +484,7 @@ static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive)
"initiated yet DRQ isn't asserted\n");
return startstop;
}
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+ ireason = hwif->INB(hwif->io_ports.nsect_addr);
if ((ireason & CD) == 0 || (ireason & IO)) {
printk(KERN_ERR "ide-scsi: (IO,CoD) != (0,1) while "
"issuing a packet command\n");
@@ -575,7 +574,7 @@ static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive,
return ide_started;
} else {
/* Issue the packet command */
- hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr);
return idescsi_transfer_pc(drive);
}
}
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 5d231015bb20..b2d481dd3750 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -217,11 +217,15 @@ static int __devexit esp_jazz_remove(struct platform_device *dev)
return 0;
}
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:jazz_esp");
+
static struct platform_driver esp_jazz_driver = {
.probe = esp_jazz_probe,
.remove = __devexit_p(esp_jazz_remove),
.driver = {
.name = "jazz_esp",
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index a9fbb3f88659..960baaf11fb1 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -182,8 +182,8 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
}
static ssize_t
-lpfc_state_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -936,7 +936,7 @@ static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
-static DEVICE_ATTR(state, S_IRUGO, lpfc_state_show, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO, lpfc_link_state_show, NULL);
static DEVICE_ATTR(option_rom_version, S_IRUGO,
lpfc_option_rom_version_show, NULL);
static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
@@ -1666,7 +1666,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_fwrev,
&dev_attr_hdw,
&dev_attr_option_rom_version,
- &dev_attr_state,
+ &dev_attr_link_state,
&dev_attr_num_discovered_ports,
&dev_attr_lpfc_drvr_version,
&dev_attr_lpfc_temp_sensor,
@@ -1714,7 +1714,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_info,
- &dev_attr_state,
+ &dev_attr_link_state,
&dev_attr_num_discovered_ports,
&dev_attr_lpfc_drvr_version,
&dev_attr_lpfc_log_verbose,
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
new file mode 100644
index 000000000000..cd37bd69a115
--- /dev/null
+++ b/drivers/scsi/mac_esp.c
@@ -0,0 +1,657 @@
+/* mac_esp.c: ESP front-end for Macintosh Quadra systems.
+ *
+ * Adapted from jazz_esp.c and the old mac_esp.c.
+ *
+ * The pseudo DMA algorithm is based on the one used in NetBSD.
+ * See sys/arch/mac68k/obio/esp.c for some background information.
+ *
+ * Copyright (C) 2007-2008 Finn Thain
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/nubus.h>
+
+#include <asm/irq.h>
+#include <asm/dma.h>
+
+#include <asm/macints.h>
+#include <asm/macintosh.h>
+
+#include <scsi/scsi_host.h>
+
+#include "esp_scsi.h"
+
+#define DRV_MODULE_NAME "mac_esp"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_VERSION "1.000"
+#define DRV_MODULE_RELDATE "Sept 15, 2007"
+
+#define MAC_ESP_IO_BASE 0x50F00000
+#define MAC_ESP_REGS_QUADRA (MAC_ESP_IO_BASE + 0x10000)
+#define MAC_ESP_REGS_QUADRA2 (MAC_ESP_IO_BASE + 0xF000)
+#define MAC_ESP_REGS_QUADRA3 (MAC_ESP_IO_BASE + 0x18000)
+#define MAC_ESP_REGS_SPACING 0x402
+#define MAC_ESP_PDMA_REG 0xF9800024
+#define MAC_ESP_PDMA_REG_SPACING 0x4
+#define MAC_ESP_PDMA_IO_OFFSET 0x100
+
+#define esp_read8(REG) mac_esp_read8(esp, REG)
+#define esp_write8(VAL, REG) mac_esp_write8(esp, VAL, REG)
+
+struct mac_esp_priv {
+ struct esp *esp;
+ void __iomem *pdma_regs;
+ void __iomem *pdma_io;
+ int error;
+};
+static struct platform_device *internal_esp, *external_esp;
+
+#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
+ platform_get_drvdata((struct platform_device *) \
+ (esp->dev)))
+
+static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg)
+{
+ nubus_writeb(val, esp->regs + reg * 16);
+}
+
+static inline u8 mac_esp_read8(struct esp *esp, unsigned long reg)
+{
+ return nubus_readb(esp->regs + reg * 16);
+}
+
+/* For pseudo DMA and PIO we need the virtual address
+ * so this address mapping is the identity mapping.
+ */
+
+static dma_addr_t mac_esp_map_single(struct esp *esp, void *buf,
+ size_t sz, int dir)
+{
+ return (dma_addr_t)buf;
+}
+
+static int mac_esp_map_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
+{
+ int i;
+
+ for (i = 0; i < num_sg; i++)
+ sg[i].dma_address = (u32)sg_virt(&sg[i]);
+ return num_sg;
+}
+
+static void mac_esp_unmap_single(struct esp *esp, dma_addr_t addr,
+ size_t sz, int dir)
+{
+ /* Nothing to do. */
+}
+
+static void mac_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
+{
+ /* Nothing to do. */
+}
+
+static void mac_esp_reset_dma(struct esp *esp)
+{
+ /* Nothing to do. */
+}
+
+static void mac_esp_dma_drain(struct esp *esp)
+{
+ /* Nothing to do. */
+}
+
+static void mac_esp_dma_invalidate(struct esp *esp)
+{
+ /* Nothing to do. */
+}
+
+static int mac_esp_dma_error(struct esp *esp)
+{
+ return MAC_ESP_GET_PRIV(esp)->error;
+}
+
+static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
+{
+ struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
+ int i = 500000;
+
+ do {
+ if (!(esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES))
+ return 0;
+
+ if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
+ return 1;
+
+ udelay(2);
+ } while (--i);
+
+ printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n",
+ esp_read8(ESP_STATUS));
+ mep->error = 1;
+ return 1;
+}
+
+static inline int mac_esp_wait_for_dreq(struct esp *esp)
+{
+ struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
+ int i = 500000;
+
+ do {
+ if (mep->pdma_regs == NULL) {
+ if (mac_irq_pending(IRQ_MAC_SCSIDRQ))
+ return 0;
+ } else {
+ if (nubus_readl(mep->pdma_regs) & 0x200)
+ return 0;
+ }
+
+ if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
+ return 1;
+
+ udelay(2);
+ } while (--i);
+
+ printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n",
+ esp_read8(ESP_STATUS));
+ mep->error = 1;
+ return 1;
+}
+
+#define MAC_ESP_PDMA_LOOP(operands) \
+ asm volatile ( \
+ " tstw %2 \n" \
+ " jbeq 20f \n" \
+ "1: movew " operands " \n" \
+ "2: movew " operands " \n" \
+ "3: movew " operands " \n" \
+ "4: movew " operands " \n" \
+ "5: movew " operands " \n" \
+ "6: movew " operands " \n" \
+ "7: movew " operands " \n" \
+ "8: movew " operands " \n" \
+ "9: movew " operands " \n" \
+ "10: movew " operands " \n" \
+ "11: movew " operands " \n" \
+ "12: movew " operands " \n" \
+ "13: movew " operands " \n" \
+ "14: movew " operands " \n" \
+ "15: movew " operands " \n" \
+ "16: movew " operands " \n" \
+ " subqw #1,%2 \n" \
+ " jbne 1b \n" \
+ "20: tstw %3 \n" \
+ " jbeq 30f \n" \
+ "21: movew " operands " \n" \
+ " subqw #1,%3 \n" \
+ " jbne 21b \n" \
+ "30: tstw %4 \n" \
+ " jbeq 40f \n" \
+ "31: moveb " operands " \n" \
+ "32: nop \n" \
+ "40: \n" \
+ " \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .align 4 \n" \
+ " .long 1b,40b \n" \
+ " .long 2b,40b \n" \
+ " .long 3b,40b \n" \
+ " .long 4b,40b \n" \
+ " .long 5b,40b \n" \
+ " .long 6b,40b \n" \
+ " .long 7b,40b \n" \
+ " .long 8b,40b \n" \
+ " .long 9b,40b \n" \
+ " .long 10b,40b \n" \
+ " .long 11b,40b \n" \
+ " .long 12b,40b \n" \
+ " .long 13b,40b \n" \
+ " .long 14b,40b \n" \
+ " .long 15b,40b \n" \
+ " .long 16b,40b \n" \
+ " .long 21b,40b \n" \
+ " .long 31b,40b \n" \
+ " .long 32b,40b \n" \
+ " .previous \n" \
+ : "+a" (addr) \
+ : "a" (mep->pdma_io), "r" (count32), "r" (count2), "g" (esp_count))
+
+static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd)
+{
+ struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ mep->error = 0;
+
+ if (!write)
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+ esp_write8((esp_count >> 0) & 0xFF, ESP_TCLOW);
+ esp_write8((esp_count >> 8) & 0xFF, ESP_TCMED);
+
+ scsi_esp_cmd(esp, cmd);
+
+ do {
+ unsigned int count32 = esp_count >> 5;
+ unsigned int count2 = (esp_count & 0x1F) >> 1;
+ unsigned int start_addr = addr;
+
+ if (mac_esp_wait_for_dreq(esp))
+ break;
+
+ if (write) {
+ MAC_ESP_PDMA_LOOP("%1@,%0@+");
+
+ esp_count -= addr - start_addr;
+ } else {
+ unsigned int n;
+
+ MAC_ESP_PDMA_LOOP("%0@+,%1@");
+
+ if (mac_esp_wait_for_empty_fifo(esp))
+ break;
+
+ n = (esp_read8(ESP_TCMED) << 8) + esp_read8(ESP_TCLOW);
+ addr = start_addr + esp_count - n;
+ esp_count = n;
+ }
+ } while (esp_count);
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Programmed IO routines follow.
+ */
+
+static inline int mac_esp_wait_for_fifo(struct esp *esp)
+{
+ int i = 500000;
+
+ do {
+ if (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES)
+ return 0;
+
+ udelay(2);
+ } while (--i);
+
+ printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
+ esp_read8(ESP_STATUS));
+ return 1;
+}
+
+static inline int mac_esp_wait_for_intr(struct esp *esp)
+{
+ int i = 500000;
+
+ do {
+ esp->sreg = esp_read8(ESP_STATUS);
+ if (esp->sreg & ESP_STAT_INTR)
+ return 0;
+
+ udelay(2);
+ } while (--i);
+
+ printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
+ return 1;
+}
+
+#define MAC_ESP_PIO_LOOP(operands, reg1) \
+ asm volatile ( \
+ "1: moveb " operands " \n" \
+ " subqw #1,%1 \n" \
+ " jbne 1b \n" \
+ : "+a" (addr), "+r" (reg1) \
+ : "a" (fifo))
+
+#define MAC_ESP_PIO_FILL(operands, reg1) \
+ asm volatile ( \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " subqw #8,%1 \n" \
+ " subqw #8,%1 \n" \
+ : "+a" (addr), "+r" (reg1) \
+ : "a" (fifo))
+
+#define MAC_ESP_FIFO_SIZE 16
+
+static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd)
+{
+ unsigned long flags;
+ struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
+ u8 *fifo = esp->regs + ESP_FDATA * 16;
+
+ local_irq_save(flags);
+
+ cmd &= ~ESP_CMD_DMA;
+ mep->error = 0;
+
+ if (write) {
+ scsi_esp_cmd(esp, cmd);
+
+ if (!mac_esp_wait_for_intr(esp)) {
+ if (mac_esp_wait_for_fifo(esp))
+ esp_count = 0;
+ } else {
+ esp_count = 0;
+ }
+ } else {
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+ if (esp_count >= MAC_ESP_FIFO_SIZE)
+ MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
+ else
+ MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
+
+ scsi_esp_cmd(esp, cmd);
+ }
+
+ while (esp_count) {
+ unsigned int n;
+
+ if (mac_esp_wait_for_intr(esp)) {
+ mep->error = 1;
+ break;
+ }
+
+ if (esp->sreg & ESP_STAT_SPAM) {
+ printk(KERN_ERR PFX "gross error\n");
+ mep->error = 1;
+ break;
+ }
+
+ n = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
+
+ if (write) {
+ if (n > esp_count)
+ n = esp_count;
+ esp_count -= n;
+
+ MAC_ESP_PIO_LOOP("%2@,%0@+", n);
+
+ if ((esp->sreg & ESP_STAT_PMASK) == ESP_STATP)
+ break;
+
+ if (esp_count) {
+ esp->ireg = esp_read8(ESP_INTRPT);
+ if (esp->ireg & ESP_INTR_DC)
+ break;
+
+ scsi_esp_cmd(esp, ESP_CMD_TI);
+ }
+ } else {
+ esp->ireg = esp_read8(ESP_INTRPT);
+ if (esp->ireg & ESP_INTR_DC)
+ break;
+
+ n = MAC_ESP_FIFO_SIZE - n;
+ if (n > esp_count)
+ n = esp_count;
+
+ if (n == MAC_ESP_FIFO_SIZE) {
+ MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
+ } else {
+ esp_count -= n;
+ MAC_ESP_PIO_LOOP("%0@+,%2@", n);
+ }
+
+ scsi_esp_cmd(esp, ESP_CMD_TI);
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+static int mac_esp_irq_pending(struct esp *esp)
+{
+ if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
+ return 1;
+ return 0;
+}
+
+static u32 mac_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
+{
+ return dma_len > 0xFFFF ? 0xFFFF : dma_len;
+}
+
+static struct esp_driver_ops mac_esp_ops = {
+ .esp_write8 = mac_esp_write8,
+ .esp_read8 = mac_esp_read8,
+ .map_single = mac_esp_map_single,
+ .map_sg = mac_esp_map_sg,
+ .unmap_single = mac_esp_unmap_single,
+ .unmap_sg = mac_esp_unmap_sg,
+ .irq_pending = mac_esp_irq_pending,
+ .dma_length_limit = mac_esp_dma_length_limit,
+ .reset_dma = mac_esp_reset_dma,
+ .dma_drain = mac_esp_dma_drain,
+ .dma_invalidate = mac_esp_dma_invalidate,
+ .send_dma_cmd = mac_esp_send_pdma_cmd,
+ .dma_error = mac_esp_dma_error,
+};
+
+static int __devinit esp_mac_probe(struct platform_device *dev)
+{
+ struct scsi_host_template *tpnt = &scsi_esp_template;
+ struct Scsi_Host *host;
+ struct esp *esp;
+ int err;
+ int chips_present;
+ struct mac_esp_priv *mep;
+
+ if (!MACH_IS_MAC)
+ return -ENODEV;
+
+ switch (macintosh_config->scsi_type) {
+ case MAC_SCSI_QUADRA:
+ case MAC_SCSI_QUADRA3:
+ chips_present = 1;
+ break;
+ case MAC_SCSI_QUADRA2:
+ if ((macintosh_config->ident == MAC_MODEL_Q900) ||
+ (macintosh_config->ident == MAC_MODEL_Q950))
+ chips_present = 2;
+ else
+ chips_present = 1;
+ break;
+ default:
+ chips_present = 0;
+ }
+
+ if (dev->id + 1 > chips_present)
+ return -ENODEV;
+
+ host = scsi_host_alloc(tpnt, sizeof(struct esp));
+
+ err = -ENOMEM;
+ if (!host)
+ goto fail;
+
+ host->max_id = 8;
+ host->use_clustering = DISABLE_CLUSTERING;
+ esp = shost_priv(host);
+
+ esp->host = host;
+ esp->dev = dev;
+
+ esp->command_block = kzalloc(16, GFP_KERNEL);
+ if (!esp->command_block)
+ goto fail_unlink;
+ esp->command_block_dma = (dma_addr_t)esp->command_block;
+
+ esp->scsi_id = 7;
+ host->this_id = esp->scsi_id;
+ esp->scsi_id_mask = 1 << esp->scsi_id;
+
+ mep = kzalloc(sizeof(struct mac_esp_priv), GFP_KERNEL);
+ if (!mep)
+ goto fail_free_command_block;
+ mep->esp = esp;
+ platform_set_drvdata(dev, mep);
+
+ switch (macintosh_config->scsi_type) {
+ case MAC_SCSI_QUADRA:
+ esp->cfreq = 16500000;
+ esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA;
+ mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET;
+ mep->pdma_regs = NULL;
+ break;
+ case MAC_SCSI_QUADRA2:
+ esp->cfreq = 25000000;
+ esp->regs = (void __iomem *)(MAC_ESP_REGS_QUADRA2 +
+ dev->id * MAC_ESP_REGS_SPACING);
+ mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET;
+ mep->pdma_regs = (void __iomem *)(MAC_ESP_PDMA_REG +
+ dev->id * MAC_ESP_PDMA_REG_SPACING);
+ nubus_writel(0x1d1, mep->pdma_regs);
+ break;
+ case MAC_SCSI_QUADRA3:
+ /* These quadras have a real DMA controller (the PSC) but we
+ * don't know how to drive it so we must use PIO instead.
+ */
+ esp->cfreq = 25000000;
+ esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA3;
+ mep->pdma_io = NULL;
+ mep->pdma_regs = NULL;
+ break;
+ }
+
+ esp->ops = &mac_esp_ops;
+ if (mep->pdma_io == NULL) {
+ printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id);
+ esp_write8(0, ESP_TCLOW);
+ esp_write8(0, ESP_TCMED);
+ esp->flags = ESP_FLAG_DISABLE_SYNC;
+ mac_esp_ops.send_dma_cmd = mac_esp_send_pio_cmd;
+ } else {
+ printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id);
+ }
+
+ host->irq = IRQ_MAC_SCSI;
+ err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "Mac ESP",
+ esp);
+ if (err < 0)
+ goto fail_free_priv;
+
+ err = scsi_esp_register(esp, &dev->dev);
+ if (err)
+ goto fail_free_irq;
+
+ return 0;
+
+fail_free_irq:
+ free_irq(host->irq, esp);
+fail_free_priv:
+ kfree(mep);
+fail_free_command_block:
+ kfree(esp->command_block);
+fail_unlink:
+ scsi_host_put(host);
+fail:
+ return err;
+}
+
+static int __devexit esp_mac_remove(struct platform_device *dev)
+{
+ struct mac_esp_priv *mep = platform_get_drvdata(dev);
+ struct esp *esp = mep->esp;
+ unsigned int irq = esp->host->irq;
+
+ scsi_esp_unregister(esp);
+
+ free_irq(irq, esp);
+
+ kfree(mep);
+
+ kfree(esp->command_block);
+
+ scsi_host_put(esp->host);
+
+ return 0;
+}
+
+static struct platform_driver esp_mac_driver = {
+ .probe = esp_mac_probe,
+ .remove = __devexit_p(esp_mac_remove),
+ .driver = {
+ .name = DRV_MODULE_NAME,
+ },
+};
+
+static int __init mac_esp_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&esp_mac_driver);
+ if (err)
+ return err;
+
+ internal_esp = platform_device_alloc(DRV_MODULE_NAME, 0);
+ if (internal_esp && platform_device_add(internal_esp)) {
+ platform_device_put(internal_esp);
+ internal_esp = NULL;
+ }
+
+ external_esp = platform_device_alloc(DRV_MODULE_NAME, 1);
+ if (external_esp && platform_device_add(external_esp)) {
+ platform_device_put(external_esp);
+ external_esp = NULL;
+ }
+
+ if (internal_esp || external_esp) {
+ return 0;
+ } else {
+ platform_driver_unregister(&esp_mac_driver);
+ return -ENOMEM;
+ }
+}
+
+static void __exit mac_esp_exit(void)
+{
+ platform_driver_unregister(&esp_mac_driver);
+
+ if (internal_esp) {
+ platform_device_unregister(internal_esp);
+ internal_esp = NULL;
+ }
+ if (external_esp) {
+ platform_device_unregister(external_esp);
+ external_esp = NULL;
+ }
+}
+
+MODULE_DESCRIPTION("Mac ESP SCSI driver");
+MODULE_AUTHOR("Finn Thain <fthain@telegraphics.com.au>");
+MODULE_LICENSE("GPLv2");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(mac_esp_init);
+module_exit(mac_esp_exit);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index d61df036910c..287690853caf 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -609,8 +609,8 @@ qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-qla2x00_state_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
int len = 0;
@@ -814,7 +814,7 @@ static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
-static DEVICE_ATTR(state, S_IRUGO, qla2x00_state_show, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
qla2x00_zio_timer_store);
@@ -838,7 +838,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_model_name,
&dev_attr_model_desc,
&dev_attr_pci_info,
- &dev_attr_state,
+ &dev_attr_link_state,
&dev_attr_zio,
&dev_attr_zio_timer,
&dev_attr_beacon,
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 9d12d9f26209..cbef785765cf 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -38,78 +38,38 @@ qla2xxx_copy_queues(scsi_qla_host_t *ha, void *ptr)
}
static int
-qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
- uint32_t cram_size, uint32_t *ext_mem, void **nxt)
+qla24xx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint32_t *ram,
+ uint32_t ram_dwords, void **nxt)
{
int rval;
- uint32_t cnt, stat, timer, risc_address, ext_mem_cnt;
- uint16_t mb[4];
+ uint32_t cnt, stat, timer, dwords, idx;
+ uint16_t mb0;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ dma_addr_t dump_dma = ha->gid_list_dma;
+ uint32_t *dump = (uint32_t *)ha->gid_list;
rval = QLA_SUCCESS;
- risc_address = ext_mem_cnt = 0;
- memset(mb, 0, sizeof(mb));
+ mb0 = 0;
- /* Code RAM. */
- risc_address = 0x20000;
- WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
+ WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- for (cnt = 0; cnt < cram_size / 4 && rval == QLA_SUCCESS;
- cnt++, risc_address++) {
- WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
- WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
- RD_REG_WORD(&reg->mailbox8);
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
-
- for (timer = 6000000; timer; timer--) {
- /* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->host_status);
- if (stat & HSRX_RISC_INT) {
- stat &= 0xff;
+ dwords = GID_LIST_SIZE / 4;
+ for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
+ cnt += dwords, addr += dwords) {
+ if (cnt + dwords > ram_dwords)
+ dwords = ram_dwords - cnt;
- if (stat == 0x1 || stat == 0x2 ||
- stat == 0x10 || stat == 0x11) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
+ WRT_REG_WORD(&reg->mailbox1, LSW(addr));
+ WRT_REG_WORD(&reg->mailbox8, MSW(addr));
- mb[0] = RD_REG_WORD(&reg->mailbox0);
- mb[2] = RD_REG_WORD(&reg->mailbox2);
- mb[3] = RD_REG_WORD(&reg->mailbox3);
+ WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
+ WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
+ WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
+ WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
- WRT_REG_DWORD(&reg->hccr,
- HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
- break;
- }
-
- /* Clear this intr; it wasn't a mailbox intr */
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
- }
- udelay(5);
- }
-
- if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
- rval = mb[0] & MBS_MASK;
- code_ram[cnt] = htonl((mb[3] << 16) | mb[2]);
- } else {
- rval = QLA_FUNCTION_FAILED;
- }
- }
-
- if (rval == QLA_SUCCESS) {
- /* External Memory. */
- risc_address = 0x100000;
- ext_mem_cnt = ha->fw_memory_size - 0x100000 + 1;
- WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
- clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- }
- for (cnt = 0; cnt < ext_mem_cnt && rval == QLA_SUCCESS;
- cnt++, risc_address++) {
- WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
- WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
- RD_REG_WORD(&reg->mailbox8);
+ WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
+ WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
for (timer = 6000000; timer; timer--) {
@@ -123,9 +83,7 @@ qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
set_bit(MBX_INTERRUPT,
&ha->mbx_cmd_flags);
- mb[0] = RD_REG_WORD(&reg->mailbox0);
- mb[2] = RD_REG_WORD(&reg->mailbox2);
- mb[3] = RD_REG_WORD(&reg->mailbox3);
+ mb0 = RD_REG_WORD(&reg->mailbox0);
WRT_REG_DWORD(&reg->hccr,
HCCRX_CLR_RISC_INT);
@@ -141,17 +99,34 @@ qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
}
if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
- rval = mb[0] & MBS_MASK;
- ext_mem[cnt] = htonl((mb[3] << 16) | mb[2]);
+ rval = mb0 & MBS_MASK;
+ for (idx = 0; idx < dwords; idx++)
+ ram[cnt + idx] = swab32(dump[idx]);
} else {
rval = QLA_FUNCTION_FAILED;
}
}
- *nxt = rval == QLA_SUCCESS ? &ext_mem[cnt]: NULL;
+ *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
return rval;
}
+static int
+qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
+ uint32_t cram_size, void **nxt)
+{
+ int rval;
+
+ /* Code RAM. */
+ rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
+ if (rval != QLA_SUCCESS)
+ return rval;
+
+ /* External Memory. */
+ return qla24xx_dump_ram(ha, 0x100000, *nxt,
+ ha->fw_memory_size - 0x100000 + 1, nxt);
+}
+
static uint32_t *
qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
uint32_t count, uint32_t *buf)
@@ -239,6 +214,90 @@ qla24xx_soft_reset(scsi_qla_host_t *ha)
return rval;
}
+static int
+qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram,
+ uint16_t ram_words, void **nxt)
+{
+ int rval;
+ uint32_t cnt, stat, timer, words, idx;
+ uint16_t mb0;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ dma_addr_t dump_dma = ha->gid_list_dma;
+ uint16_t *dump = (uint16_t *)ha->gid_list;
+
+ rval = QLA_SUCCESS;
+ mb0 = 0;
+
+ WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
+ clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+ words = GID_LIST_SIZE / 2;
+ for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
+ cnt += words, addr += words) {
+ if (cnt + words > ram_words)
+ words = ram_words - cnt;
+
+ WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
+ WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
+
+ WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
+ WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
+ WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
+ WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
+
+ WRT_MAILBOX_REG(ha, reg, 4, words);
+ WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
+
+ for (timer = 6000000; timer; timer--) {
+ /* Check for pending interrupts. */
+ stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ if (stat & HSR_RISC_INT) {
+ stat &= 0xff;
+
+ if (stat == 0x1 || stat == 0x2) {
+ set_bit(MBX_INTERRUPT,
+ &ha->mbx_cmd_flags);
+
+ mb0 = RD_MAILBOX_REG(ha, reg, 0);
+
+ /* Release mailbox registers. */
+ WRT_REG_WORD(&reg->semaphore, 0);
+ WRT_REG_WORD(&reg->hccr,
+ HCCR_CLR_RISC_INT);
+ RD_REG_WORD(&reg->hccr);
+ break;
+ } else if (stat == 0x10 || stat == 0x11) {
+ set_bit(MBX_INTERRUPT,
+ &ha->mbx_cmd_flags);
+
+ mb0 = RD_MAILBOX_REG(ha, reg, 0);
+
+ WRT_REG_WORD(&reg->hccr,
+ HCCR_CLR_RISC_INT);
+ RD_REG_WORD(&reg->hccr);
+ break;
+ }
+
+ /* clear this intr; it wasn't a mailbox intr */
+ WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
+ RD_REG_WORD(&reg->hccr);
+ }
+ udelay(5);
+ }
+
+ if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+ rval = mb0 & MBS_MASK;
+ for (idx = 0; idx < words; idx++)
+ ram[cnt + idx] = swab16(dump[idx]);
+ } else {
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+
+ *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
+ return rval;
+}
+
static inline void
qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
uint16_t *buf)
@@ -258,19 +317,14 @@ void
qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
{
int rval;
- uint32_t cnt, timer;
- uint32_t risc_address;
- uint16_t mb0, mb2;
+ uint32_t cnt;
- uint32_t stat;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint16_t __iomem *dmp_reg;
unsigned long flags;
struct qla2300_fw_dump *fw;
- uint32_t data_ram_cnt;
+ void *nxt;
- risc_address = data_ram_cnt = 0;
- mb0 = mb2 = 0;
flags = 0;
if (!hardware_locked)
@@ -388,185 +442,23 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
}
}
- if (rval == QLA_SUCCESS) {
- /* Get RISC SRAM. */
- risc_address = 0x800;
- WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
- clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- }
- for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
- cnt++, risc_address++) {
- WRT_MAILBOX_REG(ha, reg, 1, (uint16_t)risc_address);
- WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
-
- for (timer = 6000000; timer; timer--) {
- /* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
- if (stat & HSR_RISC_INT) {
- stat &= 0xff;
-
- if (stat == 0x1 || stat == 0x2) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
-
- mb0 = RD_MAILBOX_REG(ha, reg, 0);
- mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
- /* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
- WRT_REG_WORD(&reg->hccr,
- HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- break;
- } else if (stat == 0x10 || stat == 0x11) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
-
- mb0 = RD_MAILBOX_REG(ha, reg, 0);
- mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
- WRT_REG_WORD(&reg->hccr,
- HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- break;
- }
-
- /* clear this intr; it wasn't a mailbox intr */
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- }
- udelay(5);
- }
-
- if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
- rval = mb0 & MBS_MASK;
- fw->risc_ram[cnt] = htons(mb2);
- } else {
- rval = QLA_FUNCTION_FAILED;
- }
- }
-
- if (rval == QLA_SUCCESS) {
- /* Get stack SRAM. */
- risc_address = 0x10000;
- WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_EXTENDED);
- clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- }
- for (cnt = 0; cnt < sizeof(fw->stack_ram) / 2 && rval == QLA_SUCCESS;
- cnt++, risc_address++) {
- WRT_MAILBOX_REG(ha, reg, 1, LSW(risc_address));
- WRT_MAILBOX_REG(ha, reg, 8, MSW(risc_address));
- WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
-
- for (timer = 6000000; timer; timer--) {
- /* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
- if (stat & HSR_RISC_INT) {
- stat &= 0xff;
-
- if (stat == 0x1 || stat == 0x2) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
-
- mb0 = RD_MAILBOX_REG(ha, reg, 0);
- mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
- /* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
- WRT_REG_WORD(&reg->hccr,
- HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- break;
- } else if (stat == 0x10 || stat == 0x11) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
-
- mb0 = RD_MAILBOX_REG(ha, reg, 0);
- mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
- WRT_REG_WORD(&reg->hccr,
- HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- break;
- }
-
- /* clear this intr; it wasn't a mailbox intr */
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- }
- udelay(5);
- }
-
- if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
- rval = mb0 & MBS_MASK;
- fw->stack_ram[cnt] = htons(mb2);
- } else {
- rval = QLA_FUNCTION_FAILED;
- }
- }
-
- if (rval == QLA_SUCCESS) {
- /* Get data SRAM. */
- risc_address = 0x11000;
- data_ram_cnt = ha->fw_memory_size - risc_address + 1;
- WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_EXTENDED);
- clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- }
- for (cnt = 0; cnt < data_ram_cnt && rval == QLA_SUCCESS;
- cnt++, risc_address++) {
- WRT_MAILBOX_REG(ha, reg, 1, LSW(risc_address));
- WRT_MAILBOX_REG(ha, reg, 8, MSW(risc_address));
- WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
-
- for (timer = 6000000; timer; timer--) {
- /* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
- if (stat & HSR_RISC_INT) {
- stat &= 0xff;
-
- if (stat == 0x1 || stat == 0x2) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
-
- mb0 = RD_MAILBOX_REG(ha, reg, 0);
- mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
- /* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
- WRT_REG_WORD(&reg->hccr,
- HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- break;
- } else if (stat == 0x10 || stat == 0x11) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
-
- mb0 = RD_MAILBOX_REG(ha, reg, 0);
- mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
- WRT_REG_WORD(&reg->hccr,
- HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- break;
- }
+ /* Get RISC SRAM. */
+ if (rval == QLA_SUCCESS)
+ rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
+ sizeof(fw->risc_ram) / 2, &nxt);
- /* clear this intr; it wasn't a mailbox intr */
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- }
- udelay(5);
- }
+ /* Get stack SRAM. */
+ if (rval == QLA_SUCCESS)
+ rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
+ sizeof(fw->stack_ram) / 2, &nxt);
- if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
- rval = mb0 & MBS_MASK;
- fw->data_ram[cnt] = htons(mb2);
- } else {
- rval = QLA_FUNCTION_FAILED;
- }
- }
+ /* Get data SRAM. */
+ if (rval == QLA_SUCCESS)
+ rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
+ ha->fw_memory_size - 0x11000 + 1, &nxt);
if (rval == QLA_SUCCESS)
- qla2xxx_copy_queues(ha, &fw->data_ram[cnt]);
+ qla2xxx_copy_queues(ha, nxt);
if (rval != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
@@ -1010,7 +902,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
goto qla24xx_fw_dump_failed_0;
rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
- fw->ext_mem, &nxt);
+ &nxt);
if (rval != QLA_SUCCESS)
goto qla24xx_fw_dump_failed_0;
@@ -1318,7 +1210,7 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
goto qla25xx_fw_dump_failed_0;
rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
- fw->ext_mem, &nxt);
+ &nxt);
if (rval != QLA_SUCCESS)
goto qla25xx_fw_dump_failed_0;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 078f2a15f40b..cf194517400d 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1036,22 +1036,6 @@ struct mid_db_entry_24xx {
uint8_t reserved_1;
};
- /*
- * Virtual Fabric ID type definition.
- */
-typedef struct vf_id {
- uint16_t id : 12;
- uint16_t priority : 4;
-} vf_id_t;
-
-/*
- * Virtual Fabric HopCt type definition.
- */
-typedef struct vf_hopct {
- uint16_t reserved : 8;
- uint16_t hopct : 8;
-} vf_hopct_t;
-
/*
* Virtual Port Control IOCB
*/
@@ -1082,10 +1066,10 @@ struct vp_ctrl_entry_24xx {
uint8_t vp_idx_map[16];
uint16_t flags;
- struct vf_id id;
+ uint16_t id;
uint16_t reserved_4;
- struct vf_hopct hopct;
- uint8_t reserved_5[8];
+ uint16_t hopct;
+ uint8_t reserved_5[24];
};
/*
@@ -1132,9 +1116,9 @@ struct vp_config_entry_24xx {
uint16_t reserved_vp2;
uint8_t port_name_idx2[WWN_SIZE];
uint8_t node_name_idx2[WWN_SIZE];
- struct vf_id id;
+ uint16_t id;
uint16_t reserved_4;
- struct vf_hopct hopct;
+ uint16_t hopct;
uint8_t reserved_5;
};
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 76eb4fecce65..f8827068d30f 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -152,10 +152,6 @@ extern int
qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
extern int
-qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *, dma_addr_t, size_t,
- uint32_t);
-
-extern int
qla2x00_abort_command(scsi_qla_host_t *, srb_t *);
extern int
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 750d7ef83aae..4cb80b476c85 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1583,8 +1583,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
eiter->len = __constant_cpu_to_be16(4 + 4);
max_frame_size = IS_FWI2_CAPABLE(ha) ?
- (uint32_t) icb24->frame_payload_size:
- (uint32_t) ha->init_cb->frame_payload_size;
+ le16_to_cpu(icb24->frame_payload_size):
+ le16_to_cpu(ha->init_cb->frame_payload_size);
eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
size += 4 + 4;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 01e26087c1dd..bbbc5a632a1d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3645,7 +3645,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
if (le16_to_cpu(nv->login_timeout) < 4)
nv->login_timeout = __constant_cpu_to_le16(4);
ha->login_timeout = le16_to_cpu(nv->login_timeout);
- icb->login_timeout = cpu_to_le16(nv->login_timeout);
+ icb->login_timeout = nv->login_timeout;
/* Set minimum RATOV to 100 tenths of a second. */
ha->r_a_tov = 100;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 285479b62d8f..5d9a64a7879b 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -409,6 +409,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
}
set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
ha->flags.management_server_logged_in = 0;
qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]);
@@ -454,8 +455,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
ha->flags.management_server_logged_in = 0;
ha->link_data_rate = PORT_SPEED_UNKNOWN;
- if (ql2xfdmienable)
- set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0);
break;
@@ -511,6 +510,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
}
set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
ha->flags.gpsc_supported = 1;
ha->flags.management_server_logged_in = 0;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 7d0a8a4c7719..210060420809 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -681,7 +681,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
* Context:
* Kernel context.
*/
-int
+static int
qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer,
dma_addr_t phys_addr, size_t size, uint32_t tov)
{
@@ -784,7 +784,6 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n",
ha->host_no, rval));
} else {
- sp->flags |= SRB_ABORT_PENDING;
DEBUG11(printk("qla2x00_abort_command(%ld): done.\n",
ha->host_no));
}
@@ -1469,7 +1468,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
lg->port_id[0] = al_pa;
lg->port_id[1] = area;
lg->port_id[2] = domain;
- lg->vp_index = cpu_to_le16(ha->vp_idx);
+ lg->vp_index = ha->vp_idx;
rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB "
@@ -1724,7 +1723,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
lg->port_id[0] = al_pa;
lg->port_id[1] = area;
lg->port_id[2] = domain;
- lg->vp_index = cpu_to_le16(ha->vp_idx);
+ lg->vp_index = ha->vp_idx;
rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB "
@@ -2210,7 +2209,6 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
- sp->flags |= SRB_ABORT_PENDING;
}
dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2644,12 +2642,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
struct vp_rpt_id_entry_24xx *rptid_entry)
{
uint8_t vp_idx;
+ uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
scsi_qla_host_t *vha;
if (rptid_entry->entry_status != 0)
return;
- if (rptid_entry->entry_status != __constant_cpu_to_le16(CS_COMPLETE))
- return;
if (rptid_entry->format == 0) {
DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
@@ -2659,17 +2656,17 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]));
} else if (rptid_entry->format == 1) {
- vp_idx = LSB(rptid_entry->vp_idx);
+ vp_idx = LSB(stat);
DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled "
"- status %d - "
"with port id %02x%02x%02x\n",__func__,ha->host_no,
- vp_idx, MSB(rptid_entry->vp_idx),
+ vp_idx, MSB(stat),
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]));
if (vp_idx == 0)
return;
- if (MSB(rptid_entry->vp_idx) == 1)
+ if (MSB(stat) == 1)
return;
list_for_each_entry(vha, &ha->vp_list, vp_list)
@@ -2982,8 +2979,8 @@ qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
/* We update the firmware with only one data sequence. */
options |= VCO_END_OF_DATA;
- retry = 0;
do {
+ retry = 0;
memset(mn, 0, sizeof(*mn));
mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
mn->p.req.entry_count = 1;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8b33b163b1d4..3223fd16bcfe 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -67,7 +67,7 @@ static void qla2x00_free_device(scsi_qla_host_t *);
static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha);
-int ql2xfdmienable;
+int ql2xfdmienable=1;
module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xfdmienable,
"Enables FDMI registratons "
@@ -2135,7 +2135,7 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
kfree(ha->nvram);
}
-struct qla_work_evt *
+static struct qla_work_evt *
qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
int locked)
{
@@ -2152,7 +2152,7 @@ qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
return e;
}
-int
+static int
qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked)
{
unsigned long flags;
@@ -2373,7 +2373,7 @@ qla2x00_do_dpc(void *data)
} else {
fcport->login_retry = 0;
}
- if (fcport->login_retry == 0)
+ if (fcport->login_retry == 0 && status != QLA_SUCCESS)
fcport->loop_id = FC_NO_LOOP_ID;
}
if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
@@ -2599,6 +2599,10 @@ qla2x00_timer(scsi_qla_host_t *ha)
start_dpc++;
}
+ /* Process any deferred work. */
+ if (!list_empty(&ha->work_list))
+ start_dpc++;
+
/* Schedule the DPC routine if needed */
if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) ||
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) ||
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index f42f17acf2cf..afeae2bfe7eb 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.02.01-k1"
+#define QLA2XXX_VERSION "8.02.01-k2"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 3f34e9376b0a..b33e72516ef8 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -121,6 +121,7 @@ extern struct scsi_transport_template blank_transport_template;
extern void __scsi_remove_device(struct scsi_device *);
extern struct bus_type scsi_bus_type;
+extern struct attribute_group *scsi_sysfs_shost_attr_groups[];
/* scsi_netlink.c */
#ifdef CONFIG_SCSI_NETLINK
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index ed395154a5b1..3a1c99d5c775 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -190,10 +190,14 @@ void scsi_proc_host_rm(struct Scsi_Host *shost)
*/
static int proc_print_scsidevice(struct device *dev, void *data)
{
- struct scsi_device *sdev = to_scsi_device(dev);
+ struct scsi_device *sdev;
struct seq_file *s = data;
int i;
+ if (!scsi_is_sdev_device(dev))
+ goto out;
+
+ sdev = to_scsi_device(dev);
seq_printf(s,
"Host: scsi%d Channel: %02d Id: %02d Lun: %02d\n Vendor: ",
sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
@@ -230,6 +234,7 @@ static int proc_print_scsidevice(struct device *dev, void *data)
else
seq_printf(s, "\n");
+out:
return 0;
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index e67c14e31bab..fcd7455ffc39 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -322,6 +322,21 @@ out:
return NULL;
}
+static void scsi_target_destroy(struct scsi_target *starget)
+{
+ struct device *dev = &starget->dev;
+ struct Scsi_Host *shost = dev_to_shost(dev->parent);
+ unsigned long flags;
+
+ transport_destroy_device(dev);
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (shost->hostt->target_destroy)
+ shost->hostt->target_destroy(starget);
+ list_del_init(&starget->siblings);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ put_device(dev);
+}
+
static void scsi_target_dev_release(struct device *dev)
{
struct device *parent = dev->parent;
@@ -331,9 +346,14 @@ static void scsi_target_dev_release(struct device *dev)
put_device(parent);
}
+struct device_type scsi_target_type = {
+ .name = "scsi_target",
+ .release = scsi_target_dev_release,
+};
+
int scsi_is_target_device(const struct device *dev)
{
- return dev->release == scsi_target_dev_release;
+ return dev->type == &scsi_target_type;
}
EXPORT_SYMBOL(scsi_is_target_device);
@@ -391,14 +411,17 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
device_initialize(dev);
starget->reap_ref = 1;
dev->parent = get_device(parent);
- dev->release = scsi_target_dev_release;
sprintf(dev->bus_id, "target%d:%d:%d",
shost->host_no, channel, id);
+#ifndef CONFIG_SYSFS_DEPRECATED
+ dev->bus = &scsi_bus_type;
+#endif
+ dev->type = &scsi_target_type;
starget->id = id;
starget->channel = channel;
INIT_LIST_HEAD(&starget->siblings);
INIT_LIST_HEAD(&starget->devices);
- starget->state = STARGET_RUNNING;
+ starget->state = STARGET_CREATED;
starget->scsi_level = SCSI_2;
retry:
spin_lock_irqsave(shost->host_lock, flags);
@@ -411,18 +434,6 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
spin_unlock_irqrestore(shost->host_lock, flags);
/* allocate and add */
transport_setup_device(dev);
- error = device_add(dev);
- if (error) {
- dev_err(dev, "target device_add failed, error %d\n", error);
- spin_lock_irqsave(shost->host_lock, flags);
- list_del_init(&starget->siblings);
- spin_unlock_irqrestore(shost->host_lock, flags);
- transport_destroy_device(dev);
- put_device(parent);
- kfree(starget);
- return NULL;
- }
- transport_add_device(dev);
if (shost->hostt->target_alloc) {
error = shost->hostt->target_alloc(starget);
@@ -430,9 +441,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
/* don't want scsi_target_reap to do the final
* put because it will be under the host lock */
- get_device(dev);
- scsi_target_reap(starget);
- put_device(dev);
+ scsi_target_destroy(starget);
return NULL;
}
}
@@ -459,18 +468,10 @@ static void scsi_target_reap_usercontext(struct work_struct *work)
{
struct scsi_target *starget =
container_of(work, struct scsi_target, ew.work);
- struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
- unsigned long flags;
transport_remove_device(&starget->dev);
device_del(&starget->dev);
- transport_destroy_device(&starget->dev);
- spin_lock_irqsave(shost->host_lock, flags);
- if (shost->hostt->target_destroy)
- shost->hostt->target_destroy(starget);
- list_del_init(&starget->siblings);
- spin_unlock_irqrestore(shost->host_lock, flags);
- put_device(&starget->dev);
+ scsi_target_destroy(starget);
}
/**
@@ -485,21 +486,25 @@ void scsi_target_reap(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
unsigned long flags;
+ enum scsi_target_state state;
+ int empty;
spin_lock_irqsave(shost->host_lock, flags);
+ state = starget->state;
+ empty = --starget->reap_ref == 0 &&
+ list_empty(&starget->devices) ? 1 : 0;
+ spin_unlock_irqrestore(shost->host_lock, flags);
- if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
- BUG_ON(starget->state == STARGET_DEL);
- starget->state = STARGET_DEL;
- spin_unlock_irqrestore(shost->host_lock, flags);
- execute_in_process_context(scsi_target_reap_usercontext,
- &starget->ew);
+ if (!empty)
return;
- }
- spin_unlock_irqrestore(shost->host_lock, flags);
-
- return;
+ BUG_ON(state == STARGET_DEL);
+ starget->state = STARGET_DEL;
+ if (state == STARGET_CREATED)
+ scsi_target_destroy(starget);
+ else
+ execute_in_process_context(scsi_target_reap_usercontext,
+ &starget->ew);
}
/**
@@ -1048,8 +1053,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
scsi_inq_str(vend, result, 8, 16),
scsi_inq_str(mod, result, 16, 32));
});
+
}
-
+
res = SCSI_SCAN_TARGET_PRESENT;
goto out_free_result;
}
@@ -1489,7 +1495,6 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
if (scsi_host_scan_allowed(shost))
scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
mutex_unlock(&shost->scan_mutex);
- transport_configure_device(&starget->dev);
scsi_target_reap(starget);
put_device(&starget->dev);
@@ -1570,7 +1575,6 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
out_reap:
/* now determine if the target has any children at all
* and if not, nuke it */
- transport_configure_device(&starget->dev);
scsi_target_reap(starget);
put_device(&starget->dev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 67bb20ed45d2..049103f1d16f 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -21,6 +21,8 @@
#include "scsi_priv.h"
#include "scsi_logging.h"
+static struct device_type scsi_dev_type;
+
static const struct {
enum scsi_device_state value;
char *name;
@@ -249,18 +251,27 @@ shost_rd_attr(sg_tablesize, "%hu\n");
shost_rd_attr(unchecked_isa_dma, "%d\n");
shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
-static struct device_attribute *scsi_sysfs_shost_attrs[] = {
- &dev_attr_unique_id,
- &dev_attr_host_busy,
- &dev_attr_cmd_per_lun,
- &dev_attr_can_queue,
- &dev_attr_sg_tablesize,
- &dev_attr_unchecked_isa_dma,
- &dev_attr_proc_name,
- &dev_attr_scan,
- &dev_attr_hstate,
- &dev_attr_supported_mode,
- &dev_attr_active_mode,
+static struct attribute *scsi_sysfs_shost_attrs[] = {
+ &dev_attr_unique_id.attr,
+ &dev_attr_host_busy.attr,
+ &dev_attr_cmd_per_lun.attr,
+ &dev_attr_can_queue.attr,
+ &dev_attr_sg_tablesize.attr,
+ &dev_attr_unchecked_isa_dma.attr,
+ &dev_attr_proc_name.attr,
+ &dev_attr_scan.attr,
+ &dev_attr_hstate.attr,
+ &dev_attr_supported_mode.attr,
+ &dev_attr_active_mode.attr,
+ NULL
+};
+
+struct attribute_group scsi_shost_attr_group = {
+ .attrs = scsi_sysfs_shost_attrs,
+};
+
+struct attribute_group *scsi_sysfs_shost_attr_groups[] = {
+ &scsi_shost_attr_group,
NULL
};
@@ -335,7 +346,12 @@ static struct class sdev_class = {
/* all probing is done in the individual ->probe routines */
static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
{
- struct scsi_device *sdp = to_scsi_device(dev);
+ struct scsi_device *sdp;
+
+ if (dev->type != &scsi_dev_type)
+ return 0;
+
+ sdp = to_scsi_device(dev);
if (sdp->no_uld_attach)
return 0;
return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
@@ -351,10 +367,16 @@ static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
static int scsi_bus_suspend(struct device * dev, pm_message_t state)
{
- struct device_driver *drv = dev->driver;
- struct scsi_device *sdev = to_scsi_device(dev);
+ struct device_driver *drv;
+ struct scsi_device *sdev;
int err;
+ if (dev->type != &scsi_dev_type)
+ return 0;
+
+ drv = dev->driver;
+ sdev = to_scsi_device(dev);
+
err = scsi_device_quiesce(sdev);
if (err)
return err;
@@ -370,10 +392,16 @@ static int scsi_bus_suspend(struct device * dev, pm_message_t state)
static int scsi_bus_resume(struct device * dev)
{
- struct device_driver *drv = dev->driver;
- struct scsi_device *sdev = to_scsi_device(dev);
+ struct device_driver *drv;
+ struct scsi_device *sdev;
int err = 0;
+ if (dev->type != &scsi_dev_type)
+ return 0;
+
+ drv = dev->driver;
+ sdev = to_scsi_device(dev);
+
if (drv && drv->resume)
err = drv->resume(dev);
@@ -781,6 +809,27 @@ sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr,
return count;
}
+static int scsi_target_add(struct scsi_target *starget)
+{
+ int error;
+
+ if (starget->state != STARGET_CREATED)
+ return 0;
+
+ error = device_add(&starget->dev);
+ if (error) {
+ dev_err(&starget->dev, "target device_add failed, error %d\n", error);
+ get_device(&starget->dev);
+ scsi_target_reap(starget);
+ put_device(&starget->dev);
+ return error;
+ }
+ transport_add_device(&starget->dev);
+ starget->state = STARGET_RUNNING;
+
+ return 0;
+}
+
static struct device_attribute sdev_attr_queue_type_rw =
__ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
sdev_store_queue_type_rw);
@@ -796,10 +845,16 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
{
int error, i;
struct request_queue *rq = sdev->request_queue;
+ struct scsi_target *starget = sdev->sdev_target;
if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0)
return error;
+ error = scsi_target_add(starget);
+ if (error)
+ return error;
+
+ transport_configure_device(&starget->dev);
error = device_add(&sdev->sdev_gendev);
if (error) {
put_device(sdev->sdev_gendev.parent);
@@ -834,7 +889,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
goto out;
}
- error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL);
+ error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
if (error)
sdev_printk(KERN_INFO, sdev,
@@ -971,44 +1026,6 @@ int scsi_register_interface(struct class_interface *intf)
}
EXPORT_SYMBOL(scsi_register_interface);
-
-static struct device_attribute *class_attr_overridden(
- struct device_attribute **attrs,
- struct device_attribute *attr)
-{
- int i;
-
- if (!attrs)
- return NULL;
- for (i = 0; attrs[i]; i++)
- if (!strcmp(attrs[i]->attr.name, attr->attr.name))
- return attrs[i];
- return NULL;
-}
-
-static int class_attr_add(struct device *classdev,
- struct device_attribute *attr)
-{
- struct device_attribute *base_attr;
-
- /*
- * Spare the caller from having to copy things it's not interested in.
- */
- base_attr = class_attr_overridden(scsi_sysfs_shost_attrs, attr);
- if (base_attr) {
- /* extend permissions */
- attr->attr.mode |= base_attr->attr.mode;
-
- /* override null show/store with default */
- if (!attr->show)
- attr->show = base_attr->show;
- if (!attr->store)
- attr->store = base_attr->store;
- }
-
- return device_create_file(classdev, attr);
-}
-
/**
* scsi_sysfs_add_host - add scsi host to subsystem
* @shost: scsi host struct to add to subsystem
@@ -1018,20 +1035,11 @@ int scsi_sysfs_add_host(struct Scsi_Host *shost)
{
int error, i;
+ /* add host specific attributes */
if (shost->hostt->shost_attrs) {
for (i = 0; shost->hostt->shost_attrs[i]; i++) {
- error = class_attr_add(&shost->shost_dev,
- shost->hostt->shost_attrs[i]);
- if (error)
- return error;
- }
- }
-
- for (i = 0; scsi_sysfs_shost_attrs[i]; i++) {
- if (!class_attr_overridden(shost->hostt->shost_attrs,
- scsi_sysfs_shost_attrs[i])) {
error = device_create_file(&shost->shost_dev,
- scsi_sysfs_shost_attrs[i]);
+ shost->hostt->shost_attrs[i]);
if (error)
return error;
}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 6b092a6c295d..5fd64e70029d 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1961,12 +1961,17 @@ fc_timed_out(struct scsi_cmnd *scmd)
}
/*
- * Must be called with shost->host_lock held
+ * Called by fc_user_scan to locate an rport on the shost that
+ * matches the channel and target id, and invoke scsi_scan_target()
+ * on the rport.
*/
-static int fc_user_scan(struct Scsi_Host *shost, uint channel,
- uint id, uint lun)
+static void
+fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, uint lun)
{
struct fc_rport *rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(rport, &fc_host_rports(shost), peers) {
if (rport->scsi_target_id == -1)
@@ -1975,13 +1980,54 @@ static int fc_user_scan(struct Scsi_Host *shost, uint channel,
if (rport->port_state != FC_PORTSTATE_ONLINE)
continue;
- if ((channel == SCAN_WILD_CARD || channel == rport->channel) &&
- (id == SCAN_WILD_CARD || id == rport->scsi_target_id)) {
- scsi_scan_target(&rport->dev, rport->channel,
- rport->scsi_target_id, lun, 1);
+ if ((channel == rport->channel) &&
+ (id == rport->scsi_target_id)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ scsi_scan_target(&rport->dev, channel, id, lun, 1);
+ return;
}
}
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/*
+ * Called via sysfs scan routines. Necessary, as the FC transport
+ * wants to place all target objects below the rport object. So this
+ * routine must invoke the scsi_scan_target() routine with the rport
+ * object as the parent.
+ */
+static int
+fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, uint lun)
+{
+ uint chlo, chhi;
+ uint tgtlo, tgthi;
+
+ if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
+ ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
+ ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
+ return -EINVAL;
+
+ if (channel == SCAN_WILD_CARD) {
+ chlo = 0;
+ chhi = shost->max_channel + 1;
+ } else {
+ chlo = channel;
+ chhi = channel + 1;
+ }
+
+ if (id == SCAN_WILD_CARD) {
+ tgtlo = 0;
+ tgthi = shost->max_id;
+ } else {
+ tgtlo = id;
+ tgthi = id + 1;
+ }
+
+ for ( ; chlo < chhi; chlo++)
+ for ( ; tgtlo < tgthi; tgtlo++)
+ fc_user_scan_tgt(shost, chlo, tgtlo, lun);
+
return 0;
}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 27ec625ab771..7899e3dda9bf 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -192,6 +192,16 @@ static void sas_non_host_smp_request(struct request_queue *q)
sas_smp_request(q, rphy_to_shost(rphy), rphy);
}
+static void sas_host_release(struct device *dev)
+{
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+ struct request_queue *q = sas_host->q;
+
+ if (q)
+ blk_cleanup_queue(q);
+}
+
static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
{
struct request_queue *q;
@@ -199,6 +209,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
struct device *dev;
char namebuf[BUS_ID_SIZE];
const char *name;
+ void (*release)(struct device *);
if (!to_sas_internal(shost->transportt)->f->smp_handler) {
printk("%s can't handle SMP requests\n", shost->hostt->name);
@@ -209,17 +220,19 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
q = blk_init_queue(sas_non_host_smp_request, NULL);
dev = &rphy->dev;
name = dev->bus_id;
+ release = NULL;
} else {
q = blk_init_queue(sas_host_smp_request, NULL);
dev = &shost->shost_gendev;
snprintf(namebuf, sizeof(namebuf),
"sas_host%d", shost->host_no);
name = namebuf;
+ release = sas_host_release;
}
if (!q)
return -ENOMEM;
- error = bsg_register_queue(q, dev, name);
+ error = bsg_register_queue(q, dev, name, release);
if (error) {
blk_cleanup_queue(q);
return -ENOMEM;
@@ -253,7 +266,6 @@ static void sas_bsg_remove(struct Scsi_Host *shost, struct sas_rphy *rphy)
return;
bsg_unregister_queue(q);
- blk_cleanup_queue(q);
}
/*
@@ -1301,6 +1313,9 @@ static void sas_expander_release(struct device *dev)
struct sas_rphy *rphy = dev_to_rphy(dev);
struct sas_expander_device *edev = rphy_to_expander_device(rphy);
+ if (rphy->q)
+ blk_cleanup_queue(rphy->q);
+
put_device(dev->parent);
kfree(edev);
}
@@ -1310,6 +1325,9 @@ static void sas_end_device_release(struct device *dev)
struct sas_rphy *rphy = dev_to_rphy(dev);
struct sas_end_device *edev = rphy_to_end_device(rphy);
+ if (rphy->q)
+ blk_cleanup_queue(rphy->q);
+
put_device(dev->parent);
kfree(edev);
}
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index bc12b5d5d676..75a64a6cae8c 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -24,6 +24,7 @@
#include <linux/workqueue.h>
#include <linux/blkdev.h>
#include <linux/mutex.h>
+#include <linux/sysfs.h>
#include <scsi/scsi.h>
#include "scsi_priv.h"
#include <scsi/scsi_device.h>
@@ -1374,11 +1375,11 @@ static int spi_host_configure(struct transport_container *tc,
* overloads the return by setting 1<<1 if the attribute should
* be writeable */
#define TARGET_ATTRIBUTE_HELPER(name) \
- (si->f->show_##name ? 1 : 0) + \
- (si->f->set_##name ? 2 : 0)
+ (si->f->show_##name ? S_IRUGO : 0) | \
+ (si->f->set_##name ? S_IWUSR : 0)
-static int target_attribute_is_visible(struct kobject *kobj,
- struct attribute *attr, int i)
+static mode_t target_attribute_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
{
struct device *cdev = container_of(kobj, struct device, kobj);
struct scsi_target *starget = transport_class_to_starget(cdev);
@@ -1428,7 +1429,7 @@ static int target_attribute_is_visible(struct kobject *kobj,
spi_support_ius(starget))
return TARGET_ATTRIBUTE_HELPER(hold_mcs);
else if (attr == &dev_attr_revalidate.attr)
- return 1;
+ return S_IWUSR;
return 0;
}
@@ -1462,25 +1463,9 @@ static int spi_target_configure(struct transport_container *tc,
struct device *cdev)
{
struct kobject *kobj = &cdev->kobj;
- int i;
- struct attribute *attr;
- int rc;
-
- for (i = 0; (attr = target_attributes[i]) != NULL; i++) {
- int j = target_attribute_group.is_visible(kobj, attr, i);
-
- /* FIXME: as well as returning -EEXIST, which we'd like
- * to ignore, sysfs also does a WARN_ON and dumps a trace,
- * which is bad, so temporarily, skip attributes that are
- * already visible (the revalidate one) */
- if (j && attr != &dev_attr_revalidate.attr)
- rc = sysfs_add_file_to_group(kobj, attr,
- target_attribute_group.name);
- /* and make the attribute writeable if we have a set
- * function */
- if ((j & 1))
- rc = sysfs_chmod_file(kobj, attr, attr->mode | S_IWUSR);
- }
+
+ /* force an update based on parameters read from the device */
+ sysfs_update_group(kobj, &target_attribute_group);
return 0;
}
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 03e359670506..31fe6051c799 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -313,7 +313,8 @@ static struct platform_driver sgiwd93_driver = {
.probe = sgiwd93_probe,
.remove = __devexit_p(sgiwd93_remove),
.driver = {
- .name = "sgiwd93"
+ .name = "sgiwd93",
+ .owner = THIS_MODULE,
}
};
@@ -333,3 +334,4 @@ module_exit(sgiwd93_module_exit);
MODULE_DESCRIPTION("SGI WD33C93 driver");
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sgiwd93");
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 0a6b45b1b003..2bbef4c45a0d 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -53,6 +53,7 @@
MODULE_AUTHOR("Thomas Bogendörfer");
MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:snirm_53c710");
#define SNIRM710_CLOCK 32
@@ -136,6 +137,7 @@ static struct platform_driver snirm710_driver = {
.remove = __devexit_p(snirm710_driver_remove),
.driver = {
.name = "snirm_53c710",
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a860c3a9ae99..e8db66ad0bde 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4322,7 +4322,7 @@ static void do_remove_sysfs_files(void)
static ssize_t
st_defined_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev);
+ struct st_modedef *STm = dev_get_drvdata(dev);
ssize_t l = 0;
l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined);
@@ -4334,7 +4334,7 @@ DEVICE_ATTR(defined, S_IRUGO, st_defined_show, NULL);
static ssize_t
st_defblk_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev);
+ struct st_modedef *STm = dev_get_drvdata(dev);
ssize_t l = 0;
l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize);
@@ -4346,7 +4346,7 @@ DEVICE_ATTR(default_blksize, S_IRUGO, st_defblk_show, NULL);
static ssize_t
st_defdensity_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev);
+ struct st_modedef *STm = dev_get_drvdata(dev);
ssize_t l = 0;
char *fmt;
@@ -4361,7 +4361,7 @@ static ssize_t
st_defcompression_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev);
+ struct st_modedef *STm = dev_get_drvdata(dev);
ssize_t l = 0;
l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1);
@@ -4373,7 +4373,7 @@ DEVICE_ATTR(default_compression, S_IRUGO, st_defcompression_show, NULL);
static ssize_t
st_options_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev);
+ struct st_modedef *STm = dev_get_drvdata(dev);
struct scsi_tape *STp;
int i, j, options;
ssize_t l = 0;
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 06152c7fa689..7514b3a0390e 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -294,6 +294,7 @@ static struct platform_driver esp_sun3x_driver = {
.remove = __devexit_p(esp_sun3x_remove),
.driver = {
.name = "sun3x_esp",
+ .owner = THIS_MODULE,
},
};
@@ -314,3 +315,4 @@ MODULE_VERSION(DRV_VERSION);
module_init(sun3x_esp_init);
module_exit(sun3x_esp_exit);
+MODULE_ALIAS("platform:sun3x_esp");
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 58d7eee4fe81..640333b1e75c 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1715,13 +1715,12 @@ static void flush_dev(struct scsi_device *dev, unsigned long cursec, unsigned in
}
-static irqreturn_t ihdlr(int irq, unsigned int j) {
+static irqreturn_t ihdlr(unsigned int j)
+{
struct scsi_cmnd *SCpnt;
unsigned int i, k, c, status, tstatus, reg, ret;
struct mscp *spp, *cpp;
-
- if (sh[j]->irq != irq)
- panic("%s: ihdlr, irq %d, sh[j]->irq %d.\n", BN(j), irq, sh[j]->irq);
+ int irq = sh[j]->irq;
/* Check if this board need to be serviced */
if (!((reg = inb(sh[j]->io_port + REG_SYS_INTR)) & IRQ_ASSERTED)) goto none;
@@ -1935,7 +1934,7 @@ static irqreturn_t do_interrupt_handler(int irq, void *shap) {
if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return IRQ_NONE;
spin_lock_irqsave(sh[j]->host_lock, spin_flags);
- ret = ihdlr(irq, j);
+ ret = ihdlr(j);
spin_unlock_irqrestore(sh[j]->host_lock, spin_flags);
return ret;
}
diff --git a/drivers/serial/68360serial.c b/drivers/serial/68360serial.c
index 2aa6bfe8fdb3..f59463601874 100644
--- a/drivers/serial/68360serial.c
+++ b/drivers/serial/68360serial.c
@@ -51,6 +51,7 @@ extern int kgdb_output_string (const char* s, unsigned int count);
/* #ifdef CONFIG_SERIAL_CONSOLE */ /* This seems to be a post 2.0 thing - mles */
#include <linux/console.h>
+#include <linux/jiffies.h>
/* this defines the index into rs_table for the port to use
*/
@@ -1729,7 +1730,7 @@ static void rs_360_wait_until_sent(struct tty_struct *tty, int timeout)
msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
- if (timeout && ((orig_jiffies + timeout) < jiffies))
+ if (timeout && (time_after(jiffies, orig_jiffies + timeout)))
break;
/* The 'tx_cur' is really the next buffer to send. We
* have to back up to the previous BD and wait for it
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 96a585e1cee8..ea41f2626458 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1868,6 +1868,7 @@ static int serial8250_startup(struct uart_port *port)
}
if (is_real_interrupt(up->port.irq)) {
+ unsigned char iir1;
/*
* Test for UARTs that do not reassert THRE when the
* transmitter is idle and the interrupt has already
@@ -1881,7 +1882,7 @@ static int serial8250_startup(struct uart_port *port)
wait_for_xmitr(up, UART_LSR_THRE);
serial_out_sync(up, UART_IER, UART_IER_THRI);
udelay(1); /* allow THRE to set */
- serial_in(up, UART_IIR);
+ iir1 = serial_in(up, UART_IIR);
serial_out(up, UART_IER, 0);
serial_out_sync(up, UART_IER, UART_IER_THRI);
udelay(1); /* allow a working UART time to re-assert THRE */
@@ -1894,7 +1895,7 @@ static int serial8250_startup(struct uart_port *port)
* If the interrupt is not reasserted, setup a timer to
* kick the UART on a regular basis.
*/
- if (iir & UART_IIR_NO_INT) {
+ if (!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) {
pr_debug("ttyS%d - using backup timer\n", port->line);
up->timer.function = serial8250_backup_timeout;
up->timer.data = (unsigned long)up;
@@ -2228,7 +2229,9 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
}
serial8250_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
- tty_termios_encode_baud_rate(termios, baud, baud);
+ /* Don't rewrite B0 */
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
}
static void
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index f97224ce59da..6e57382b9137 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -775,7 +775,7 @@ pci_default_setup(struct serial_private *priv, struct pciserial_board *board,
* This list is ordered alphabetically by vendor then device.
* Specific entries must come before more generic entries.
*/
-static struct pci_serial_quirk pci_serial_quirks[] = {
+static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
/*
* ADDI-DATA GmbH communication cards <info@addi-data.com>
*/
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 55492fa095a2..c065a704a93a 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -96,7 +96,6 @@
/* PDC registers */
#define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR)
-#define UART_GET_TCR(port) __raw_readl((port)->membase + ATMEL_PDC_TCR)
#define UART_GET_PTSR(port) __raw_readl((port)->membase + ATMEL_PDC_PTSR)
#define UART_PUT_RPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RPR)
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index 383c4e660cd5..88e7c1d5b919 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -3582,6 +3582,8 @@ rs_tiocmset(struct tty_struct *tty, struct file *file,
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+ lock_kernel();
+
if (clear & TIOCM_RTS)
e100_rts(info, 0);
if (clear & TIOCM_DTR)
@@ -3601,6 +3603,8 @@ rs_tiocmset(struct tty_struct *tty, struct file *file,
e100_ri_out(info, 1);
if (set & TIOCM_CD)
e100_cd_out(info, 1);
+
+ unlock_kernel();
return 0;
}
@@ -3610,6 +3614,7 @@ rs_tiocmget(struct tty_struct *tty, struct file *file)
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
unsigned int result;
+ lock_kernel();
result =
(!E100_RTS_GET(info) ? TIOCM_RTS : 0)
| (!E100_DTR_GET(info) ? TIOCM_DTR : 0)
@@ -3618,6 +3623,8 @@ rs_tiocmget(struct tty_struct *tty, struct file *file)
| (!E100_CD_GET(info) ? TIOCM_CAR : 0)
| (!E100_CTS_GET(info) ? TIOCM_CTS : 0);
+ unlock_kernel();
+
#ifdef SERIAL_DEBUG_IO
printk(KERN_DEBUG "ser%i: modem state: %i 0x%08X\n",
info->line, result, result);
diff --git a/drivers/serial/dz.c b/drivers/serial/dz.c
index 116211fcd36f..0dddd68b20d2 100644
--- a/drivers/serial/dz.c
+++ b/drivers/serial/dz.c
@@ -819,7 +819,7 @@ static void dz_console_putchar(struct uart_port *uport, int ch)
dz_out(dport, DZ_TCR, mask);
iob();
udelay(2);
- } while (loops--);
+ } while (--loops);
if (loops) /* Cannot send otherwise. */
dz_out(dport, DZ_TDR, ch);
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index a9ca03ead3e5..977ce820ce30 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -329,13 +329,15 @@ EXPORT_SYMBOL(uart_update_timeout);
* If it's still invalid, we try 9600 baud.
*
* Update the @termios structure to reflect the baud rate
- * we're actually going to be using.
+ * we're actually going to be using. Don't do this for the case
+ * where B0 is requested ("hang up").
*/
unsigned int
uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
struct ktermios *old, unsigned int min, unsigned int max)
{
unsigned int try, baud, altbaud = 38400;
+ int hung_up = 0;
upf_t flags = port->flags & UPF_SPD_MASK;
if (flags == UPF_SPD_HI)
@@ -360,8 +362,10 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
/*
* Special case: B0 rate.
*/
- if (baud == 0)
+ if (baud == 0) {
+ hung_up = 1;
baud = 9600;
+ }
if (baud >= min && baud <= max)
return baud;
@@ -373,7 +377,9 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
termios->c_cflag &= ~CBAUD;
if (old) {
baud = tty_termios_baud_rate(old);
- tty_termios_encode_baud_rate(termios, baud, baud);
+ if (!hung_up)
+ tty_termios_encode_baud_rate(termios,
+ baud, baud);
old = NULL;
continue;
}
@@ -382,7 +388,8 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
* As a last resort, if the quotient is zero,
* default to 9600 bps
*/
- tty_termios_encode_baud_rate(termios, 9600, 9600);
+ if (!hung_up)
+ tty_termios_encode_baud_rate(termios, 9600, 9600);
}
return 0;
diff --git a/drivers/serial/vr41xx_siu.c b/drivers/serial/vr41xx_siu.c
index 98ab649c1ff9..bb6ce6bba32f 100644
--- a/drivers/serial/vr41xx_siu.c
+++ b/drivers/serial/vr41xx_siu.c
@@ -1,7 +1,7 @@
/*
* Driver for NEC VR4100 series Serial Interface Unit.
*
- * Copyright (C) 2004-2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2004-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* Based on drivers/serial/8250.c, by Russell King.
*
@@ -840,6 +840,19 @@ static int __devinit siu_console_init(void)
console_initcall(siu_console_init);
+void __init vr41xx_siu_early_setup(struct uart_port *port)
+{
+ if (port->type == PORT_UNKNOWN)
+ return;
+
+ siu_uart_ports[port->line].line = port->line;
+ siu_uart_ports[port->line].type = port->type;
+ siu_uart_ports[port->line].uartclk = SIU_BAUD_BASE * 16;
+ siu_uart_ports[port->line].mapbase = port->mapbase;
+ siu_uart_ports[port->line].mapbase = port->mapbase;
+ siu_uart_ports[port->line].ops = &siu_uart_ops;
+}
+
#define SERIAL_VR41XX_CONSOLE &siu_console
#else
#define SERIAL_VR41XX_CONSOLE NULL
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index d8107890db15..fae9e8f3d092 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -5,11 +5,9 @@
# nobody's needed a slave side API yet. The master-role API is not
# fully appropriate there, so it'd need some thought to do well.
#
-menu "SPI support"
- depends on HAS_IOMEM
-
-config SPI
+menuconfig SPI
bool "SPI support"
+ depends on HAS_IOMEM
help
The "Serial Peripheral Interface" is a low level synchronous
protocol. Chips that support SPI can have data transfer rates
@@ -28,9 +26,11 @@ config SPI
(half duplex), SSP, SSI, and PSP. This driver framework should
work with most such devices and controllers.
+if SPI
+
config SPI_DEBUG
boolean "Debug support for SPI drivers"
- depends on SPI && DEBUG_KERNEL
+ depends on DEBUG_KERNEL
help
Say "yes" to enable debug messaging (like dev_dbg and pr_debug),
sysfs, and debugfs support in SPI controller and protocol drivers.
@@ -245,5 +245,4 @@ config SPI_TLE62X0
# (slave support would go here)
-endmenu # "SPI support"
-
+endif # SPI
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 1749a27be066..02c8e305b14f 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -616,7 +616,7 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
return -ESHUTDOWN;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (!(xfer->tx_buf || xfer->rx_buf)) {
+ if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
dev_dbg(&spi->dev, "missing rx or tx buf\n");
return -EINVAL;
}
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index 5f00bd6500ef..d9ae111c27ae 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -151,7 +151,7 @@ static int wait_uwire_csr_flag(u16 mask, u16 val, int might_not_catch)
if (time_after(jiffies, max_jiffies)) {
printk(KERN_ERR "%s: timeout. reg=%#06x "
"mask=%#06x val=%#06x\n",
- __FUNCTION__, w, mask, val);
+ __func__, w, mask, val);
return -1;
}
c++;
@@ -437,7 +437,7 @@ static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
}
omap_uwire_configure_mode(spi->chip_select, flags);
pr_debug("%s: uwire flags %02x, armxor %lu KHz, SCK %lu KHz\n",
- __FUNCTION__, flags,
+ __func__, flags,
clk_get_rate(uwire->ck) / 1000,
rate / 1000);
status = 0;
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 147e26a78d64..654bb58be630 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -67,8 +67,11 @@ MODULE_ALIAS("platform:pxa2xx-spi");
| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
#define DEFINE_SSP_REG(reg, off) \
-static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
-static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
+static inline u32 read_##reg(void const __iomem *p) \
+{ return __raw_readl(p + (off)); } \
+\
+static inline void write_##reg(u32 v, void __iomem *p) \
+{ __raw_writel(v, p + (off)); }
DEFINE_SSP_REG(SSCR0, 0x00)
DEFINE_SSP_REG(SSCR1, 0x04)
@@ -106,7 +109,7 @@ struct driver_data {
u32 *null_dma_buf;
/* SSP register addresses */
- void *ioaddr;
+ void __iomem *ioaddr;
u32 ssdr_physical;
/* SSP masks*/
@@ -173,7 +176,7 @@ static int flush(struct driver_data *drv_data)
{
unsigned long limit = loops_per_jiffy << 1;
- void *reg = drv_data->ioaddr;
+ void __iomem *reg = drv_data->ioaddr;
do {
while (read_SSSR(reg) & SSSR_RNE) {
@@ -191,7 +194,7 @@ static void null_cs_control(u32 command)
static int null_writer(struct driver_data *drv_data)
{
- void *reg = drv_data->ioaddr;
+ void __iomem *reg = drv_data->ioaddr;
u8 n_bytes = drv_data->n_bytes;
if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
@@ -206,7 +209,7 @@ static int null_writer(struct driver_data *drv_data)
static int null_reader(struct driver_data *drv_data)
{
- void *reg = drv_data->ioaddr;
+ void __iomem *reg = drv_data->ioaddr;
u8 n_bytes = drv_data->n_bytes;
while ((read_SSSR(reg) & SSSR_RNE)
@@ -220,7 +223,7 @@ static int null_reader(struct driver_data *drv_data)
static int u8_writer(struct driver_data *drv_data)
{
- void *reg = drv_data->ioaddr;
+ void __iomem *reg = drv_data->ioaddr;
if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
|| (drv_data->tx == drv_data->tx_end))
@@ -234,7 +237,7 @@ static int u8_writer(struct driver_data *drv_data)
static int u8_reader(struct driver_data *drv_data)
{
- void *reg = drv_data->ioaddr;
+ void __iomem *reg = drv_data->ioaddr;
while ((read_SSSR(reg) & SSSR_RNE)
&& (drv_data->rx < drv_data->rx_end)) {
@@ -247,7 +250,7 @@ static int u8_reader(struct driver_data *drv_data)
static int u16_writer(struct driver_data *drv_data)
{
- void *reg = drv_data->ioaddr;
+ void __iomem *reg = drv_data->ioaddr;
if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
|| (drv_data->tx == drv_data->tx_end))
@@ -261,7 +264,7 @@ static int u16_writer(struct driver_data *drv_data)
static int u16_reader(struct driver_data *drv_data)
{
- void *reg = drv_data->ioaddr;
+ void __iomem *reg = drv_data->ioaddr;
while ((read_SSSR(reg) & SSSR_RNE)
&& (drv_data->rx < drv_data->rx_end)) {
@@ -274,7 +277,7 @@ static int u16_reader(struct driver_data *drv_data)
static int u32_writer(struct driver_data *drv_data)
{
- void *reg = drv_data->ioaddr;
+ void __iomem *reg = drv_data->ioaddr;
if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
|| (drv_data->tx == drv_data->tx_end))
@@ -288,7 +291,7 @@ static int u32_writer(struct driver_data *drv_data)
static int u32_reader(struct driver_data *drv_data)
{
- void *reg = drv_data->ioaddr;
+ void __iomem *reg = drv_data->ioaddr;
while ((read_SSSR(reg) & SSSR_RNE)
&& (drv_data->rx < drv_data->rx_end)) {
@@ -412,7 +415,7 @@ static void giveback(struct driver_data *drv_data)
msg->complete(msg->context);
}
-static int wait_ssp_rx_stall(void *ioaddr)
+static int wait_ssp_rx_stall(void const __iomem *ioaddr)
{
unsigned long limit = loops_per_jiffy << 1;
@@ -432,9 +435,9 @@ static int wait_dma_channel_stop(int channel)
return limit;
}
-void dma_error_stop(struct driver_data *drv_data, const char *msg)
+static void dma_error_stop(struct driver_data *drv_data, const char *msg)
{
- void *reg = drv_data->ioaddr;
+ void __iomem *reg = drv_data->ioaddr;
/* Stop and reset */
DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
@@ -456,7 +459,7 @@ void dma_error_stop(struct driver_data *drv_data, const char *msg)
static void dma_transfer_complete(struct driver_data *drv_data)
{
- void *reg = drv_data->ioaddr;
+ void __iomem *reg = drv_data->ioaddr;
struct spi_message *msg = drv_data->cur_msg;
/* Clear and disable interrupts on SSP and DMA channels*/
@@ -536,7 +539,7 @@ static void dma_handler(int channel, void *data)
static irqreturn_t dma_transfer(struct driver_data *drv_data)
{
u32 irq_status;
- void *reg = drv_data->ioaddr;
+ void __iomem *reg = drv_data->ioaddr;
irq_status = read_SSSR(reg) & drv_data->mask_sr;
if (irq_status & SSSR_ROR) {
@@ -570,7 +573,7 @@ static irqreturn_t dma_transfer(struct driver_data *drv_data)
static void int_error_stop(struct driver_data *drv_data, const char* msg)
{
- void *reg = drv_data->ioaddr;
+ void __iomem *reg = drv_data->ioaddr;
/* Stop and reset SSP */
write_SSSR(drv_data->clear_sr, reg);
@@ -588,7 +591,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
static void int_transfer_complete(struct driver_data *drv_data)
{
- void *reg = drv_data->ioaddr;
+ void __iomem *reg = drv_data->ioaddr;
/* Stop SSP */
write_SSSR(drv_data->clear_sr, reg);
@@ -614,7 +617,7 @@ static void int_transfer_complete(struct driver_data *drv_data)
static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
{
- void *reg = drv_data->ioaddr;
+ void __iomem *reg = drv_data->ioaddr;
u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ?
drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
@@ -675,7 +678,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
static irqreturn_t ssp_int(int irq, void *dev_id)
{
struct driver_data *drv_data = dev_id;
- void *reg = drv_data->ioaddr;
+ void __iomem *reg = drv_data->ioaddr;
if (!drv_data->cur_msg) {
@@ -695,7 +698,8 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
return drv_data->transfer_handler(drv_data);
}
-int set_dma_burst_and_threshold(struct chip_data *chip, struct spi_device *spi,
+static int set_dma_burst_and_threshold(struct chip_data *chip,
+ struct spi_device *spi,
u8 bits_per_word, u32 *burst_code,
u32 *threshold)
{
@@ -809,7 +813,7 @@ static void pump_transfers(unsigned long data)
struct spi_transfer *previous = NULL;
struct chip_data *chip = NULL;
struct ssp_device *ssp = drv_data->ssp;
- void *reg = drv_data->ioaddr;
+ void __iomem *reg = drv_data->ioaddr;
u32 clk_div = 0;
u8 bits = 0;
u32 speed = 0;
@@ -1338,7 +1342,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct pxa2xx_spi_master *platform_info;
struct spi_master *master;
- struct driver_data *drv_data = 0;
+ struct driver_data *drv_data = NULL;
struct ssp_device *ssp;
int status = 0;
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 71e881419cdd..96cc39ecb6e2 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -214,7 +214,7 @@ int spi_bitbang_setup(struct spi_device *spi)
return retval;
dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
- __FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA),
+ __func__, spi->mode & (SPI_CPOL | SPI_CPHA),
spi->bits_per_word, 2 * cs->nsecs);
/* NOTE we _need_ to call chipselect() early, ideally with adapter
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index d4ba640366b6..c730d05bfeb6 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -270,19 +270,26 @@ struct chip_data {
static void pump_messages(struct work_struct *work);
-static int flush(struct driver_data *drv_data)
+static void flush(struct driver_data *drv_data)
{
- unsigned long limit = loops_per_jiffy << 1;
void __iomem *regs = drv_data->regs;
- volatile u32 d;
+ u32 control;
dev_dbg(&drv_data->pdev->dev, "flush\n");
+
+ /* Wait for end of transaction */
do {
- while (readl(regs + SPI_INT_STATUS) & SPI_STATUS_RR)
- d = readl(regs + SPI_RXDATA);
- } while ((readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH) && limit--);
+ control = readl(regs + SPI_CONTROL);
+ } while (control & SPI_CONTROL_XCH);
+
+ /* Release chip select if requested, transfer delays are
+ handled in pump_transfers */
+ if (drv_data->cs_change)
+ drv_data->cs_control(SPI_CS_DEASSERT);
- return limit;
+ /* Disable SPI to flush FIFOs */
+ writel(control & ~SPI_CONTROL_SPIEN, regs + SPI_CONTROL);
+ writel(control, regs + SPI_CONTROL);
}
static void restore_state(struct driver_data *drv_data)
@@ -570,6 +577,7 @@ static void giveback(struct spi_message *message, struct driver_data *drv_data)
writel(0, regs + SPI_INT_STATUS);
writel(0, regs + SPI_DMA);
+ /* Unconditioned deselct */
drv_data->cs_control(SPI_CS_DEASSERT);
message->state = NULL;
@@ -592,13 +600,10 @@ static void dma_err_handler(int channel, void *data, int errcode)
/* Disable both rx and tx dma channels */
imx_dma_disable(drv_data->rx_channel);
imx_dma_disable(drv_data->tx_channel);
-
- if (flush(drv_data) == 0)
- dev_err(&drv_data->pdev->dev,
- "dma_err_handler - flush failed\n");
-
unmap_dma_buffers(drv_data);
+ flush(drv_data);
+
msg->state = ERROR_STATE;
tasklet_schedule(&drv_data->pump_transfers);
}
@@ -612,8 +617,7 @@ static void dma_tx_handler(int channel, void *data)
imx_dma_disable(channel);
/* Now waits for TX FIFO empty */
- writel(readl(drv_data->regs + SPI_INT_STATUS) | SPI_INTEN_TE,
- drv_data->regs + SPI_INT_STATUS);
+ writel(SPI_INTEN_TE, drv_data->regs + SPI_INT_STATUS);
}
static irqreturn_t dma_transfer(struct driver_data *drv_data)
@@ -621,19 +625,18 @@ static irqreturn_t dma_transfer(struct driver_data *drv_data)
u32 status;
struct spi_message *msg = drv_data->cur_msg;
void __iomem *regs = drv_data->regs;
- unsigned long limit;
status = readl(regs + SPI_INT_STATUS);
- if ((status & SPI_INTEN_RO) && (status & SPI_STATUS_RO)) {
+ if ((status & (SPI_INTEN_RO | SPI_STATUS_RO))
+ == (SPI_INTEN_RO | SPI_STATUS_RO)) {
writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
+ imx_dma_disable(drv_data->tx_channel);
imx_dma_disable(drv_data->rx_channel);
unmap_dma_buffers(drv_data);
- if (flush(drv_data) == 0)
- dev_err(&drv_data->pdev->dev,
- "dma_transfer - flush failed\n");
+ flush(drv_data);
dev_warn(&drv_data->pdev->dev,
"dma_transfer - fifo overun\n");
@@ -649,20 +652,17 @@ static irqreturn_t dma_transfer(struct driver_data *drv_data)
if (drv_data->rx) {
/* Wait end of transfer before read trailing data */
- limit = loops_per_jiffy << 1;
- while ((readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH) &&
- limit--);
-
- if (limit == 0)
- dev_err(&drv_data->pdev->dev,
- "dma_transfer - end of tx failed\n");
- else
- dev_dbg(&drv_data->pdev->dev,
- "dma_transfer - end of tx\n");
+ while (readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH)
+ cpu_relax();
imx_dma_disable(drv_data->rx_channel);
unmap_dma_buffers(drv_data);
+ /* Release chip select if requested, transfer delays are
+ handled in pump_transfers() */
+ if (drv_data->cs_change)
+ drv_data->cs_control(SPI_CS_DEASSERT);
+
/* Calculate number of trailing data and read them */
dev_dbg(&drv_data->pdev->dev,
"dma_transfer - test = 0x%08X\n",
@@ -676,19 +676,12 @@ static irqreturn_t dma_transfer(struct driver_data *drv_data)
/* Write only transfer */
unmap_dma_buffers(drv_data);
- if (flush(drv_data) == 0)
- dev_err(&drv_data->pdev->dev,
- "dma_transfer - flush failed\n");
+ flush(drv_data);
}
/* End of transfer, update total byte transfered */
msg->actual_length += drv_data->len;
- /* Release chip select if requested, transfer delays are
- handled in pump_transfers() */
- if (drv_data->cs_change)
- drv_data->cs_control(SPI_CS_DEASSERT);
-
/* Move to next transfer */
msg->state = next_transfer(drv_data);
@@ -711,44 +704,43 @@ static irqreturn_t interrupt_wronly_transfer(struct driver_data *drv_data)
status = readl(regs + SPI_INT_STATUS);
- while (status & SPI_STATUS_TH) {
+ if (status & SPI_INTEN_TE) {
+ /* TXFIFO Empty Interrupt on the last transfered word */
+ writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
dev_dbg(&drv_data->pdev->dev,
- "interrupt_wronly_transfer - status = 0x%08X\n", status);
+ "interrupt_wronly_transfer - end of tx\n");
- /* Pump data */
- if (write(drv_data)) {
- writel(readl(regs + SPI_INT_STATUS) & ~SPI_INTEN,
- regs + SPI_INT_STATUS);
+ flush(drv_data);
- dev_dbg(&drv_data->pdev->dev,
- "interrupt_wronly_transfer - end of tx\n");
+ /* Update total byte transfered */
+ msg->actual_length += drv_data->len;
- if (flush(drv_data) == 0)
- dev_err(&drv_data->pdev->dev,
- "interrupt_wronly_transfer - "
- "flush failed\n");
+ /* Move to next transfer */
+ msg->state = next_transfer(drv_data);
- /* End of transfer, update total byte transfered */
- msg->actual_length += drv_data->len;
+ /* Schedule transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
- /* Release chip select if requested, transfer delays are
- handled in pump_transfers */
- if (drv_data->cs_change)
- drv_data->cs_control(SPI_CS_DEASSERT);
+ return IRQ_HANDLED;
+ } else {
+ while (status & SPI_STATUS_TH) {
+ dev_dbg(&drv_data->pdev->dev,
+ "interrupt_wronly_transfer - status = 0x%08X\n",
+ status);
- /* Move to next transfer */
- msg->state = next_transfer(drv_data);
+ /* Pump data */
+ if (write(drv_data)) {
+ /* End of TXFIFO writes,
+ now wait until TXFIFO is empty */
+ writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
+ return IRQ_HANDLED;
+ }
- /* Schedule transfer tasklet */
- tasklet_schedule(&drv_data->pump_transfers);
+ status = readl(regs + SPI_INT_STATUS);
- return IRQ_HANDLED;
+ /* We did something */
+ handled = IRQ_HANDLED;
}
-
- status = readl(regs + SPI_INT_STATUS);
-
- /* We did something */
- handled = IRQ_HANDLED;
}
return handled;
@@ -758,45 +750,31 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
{
struct spi_message *msg = drv_data->cur_msg;
void __iomem *regs = drv_data->regs;
- u32 status;
+ u32 status, control;
irqreturn_t handled = IRQ_NONE;
unsigned long limit;
status = readl(regs + SPI_INT_STATUS);
- while (status & (SPI_STATUS_TH | SPI_STATUS_RO)) {
+ if (status & SPI_INTEN_TE) {
+ /* TXFIFO Empty Interrupt on the last transfered word */
+ writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
dev_dbg(&drv_data->pdev->dev,
- "interrupt_transfer - status = 0x%08X\n", status);
-
- if (status & SPI_STATUS_RO) {
- writel(readl(regs + SPI_INT_STATUS) & ~SPI_INTEN,
- regs + SPI_INT_STATUS);
-
- dev_warn(&drv_data->pdev->dev,
- "interrupt_transfer - fifo overun\n"
- " data not yet written = %d\n"
- " data not yet read = %d\n",
- data_to_write(drv_data),
- data_to_read(drv_data));
-
- if (flush(drv_data) == 0)
- dev_err(&drv_data->pdev->dev,
- "interrupt_transfer - flush failed\n");
-
- msg->state = ERROR_STATE;
- tasklet_schedule(&drv_data->pump_transfers);
+ "interrupt_transfer - end of tx\n");
- return IRQ_HANDLED;
- }
-
- /* Pump data */
- read(drv_data);
- if (write(drv_data)) {
- writel(readl(regs + SPI_INT_STATUS) & ~SPI_INTEN,
- regs + SPI_INT_STATUS);
+ if (msg->state == ERROR_STATE) {
+ /* RXFIFO overrun was detected and message aborted */
+ flush(drv_data);
+ } else {
+ /* Wait for end of transaction */
+ do {
+ control = readl(regs + SPI_CONTROL);
+ } while (control & SPI_CONTROL_XCH);
- dev_dbg(&drv_data->pdev->dev,
- "interrupt_transfer - end of tx\n");
+ /* Release chip select if requested, transfer delays are
+ handled in pump_transfers */
+ if (drv_data->cs_change)
+ drv_data->cs_control(SPI_CS_DEASSERT);
/* Read trailing bytes */
limit = loops_per_jiffy << 1;
@@ -810,27 +788,54 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
dev_dbg(&drv_data->pdev->dev,
"interrupt_transfer - end of rx\n");
- /* End of transfer, update total byte transfered */
+ /* Update total byte transfered */
msg->actual_length += drv_data->len;
- /* Release chip select if requested, transfer delays are
- handled in pump_transfers */
- if (drv_data->cs_change)
- drv_data->cs_control(SPI_CS_DEASSERT);
-
/* Move to next transfer */
msg->state = next_transfer(drv_data);
+ }
- /* Schedule transfer tasklet */
- tasklet_schedule(&drv_data->pump_transfers);
+ /* Schedule transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
- return IRQ_HANDLED;
- }
+ return IRQ_HANDLED;
+ } else {
+ while (status & (SPI_STATUS_TH | SPI_STATUS_RO)) {
+ dev_dbg(&drv_data->pdev->dev,
+ "interrupt_transfer - status = 0x%08X\n",
+ status);
+
+ if (status & SPI_STATUS_RO) {
+ /* RXFIFO overrun, abort message end wait
+ until TXFIFO is empty */
+ writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
+
+ dev_warn(&drv_data->pdev->dev,
+ "interrupt_transfer - fifo overun\n"
+ " data not yet written = %d\n"
+ " data not yet read = %d\n",
+ data_to_write(drv_data),
+ data_to_read(drv_data));
+
+ msg->state = ERROR_STATE;
+
+ return IRQ_HANDLED;
+ }
- status = readl(regs + SPI_INT_STATUS);
+ /* Pump data */
+ read(drv_data);
+ if (write(drv_data)) {
+ /* End of TXFIFO writes,
+ now wait until TXFIFO is empty */
+ writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
+ return IRQ_HANDLED;
+ }
- /* We did something */
- handled = IRQ_HANDLED;
+ status = readl(regs + SPI_INT_STATUS);
+
+ /* We did something */
+ handled = IRQ_HANDLED;
+ }
}
return handled;
diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c
index be15a6213205..189f706b9e4b 100644
--- a/drivers/spi/spi_mpc83xx.c
+++ b/drivers/spi/spi_mpc83xx.c
@@ -310,7 +310,7 @@ static int mpc83xx_spi_setup(struct spi_device *spi)
return retval;
dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec\n",
- __FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA),
+ __func__, spi->mode & (SPI_CPOL | SPI_CPHA),
spi->bits_per_word, 2 * mpc83xx_spi->nsecs);
/* NOTE we _need_ to call chipselect() early, ideally with adapter
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index b7476b888197..34bfb7dd7764 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -169,7 +169,7 @@ static int s3c24xx_spi_setup(struct spi_device *spi)
}
dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n",
- __FUNCTION__, spi->mode, spi->bits_per_word,
+ __func__, spi->mode, spi->bits_per_word,
spi->max_speed_hz);
return 0;
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index cf6aef34fe25..113a0468ffcb 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -151,13 +151,13 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi,
hz = (t) ? t->speed_hz : spi->max_speed_hz;
if (bits_per_word != 8) {
dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
- __FUNCTION__, bits_per_word);
+ __func__, bits_per_word);
return -EINVAL;
}
if (hz && xspi->speed_hz > hz) {
dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n",
- __FUNCTION__, hz);
+ __func__, hz);
return -EINVAL;
}
@@ -181,7 +181,7 @@ static int xilinx_spi_setup(struct spi_device *spi)
if (spi->mode & ~MODEBITS) {
dev_err(&spi->dev, "%s, unsupported mode bits %x\n",
- __FUNCTION__, spi->mode & ~MODEBITS);
+ __func__, spi->mode & ~MODEBITS);
return -EINVAL;
}
@@ -190,7 +190,7 @@ static int xilinx_spi_setup(struct spi_device *spi)
return retval;
dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
- __FUNCTION__, spi->mode & MODEBITS, spi->bits_per_word, 0);
+ __func__, spi->mode & MODEBITS, spi->bits_per_word, 0);
return 0;
}
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index e3dc8f8d0c3e..a576dc261732 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -139,6 +139,30 @@ config FB_SYS_IMAGEBLIT
blitting. This is used by drivers that don't provide their own
(accelerated) version and the framebuffer is in system RAM.
+menuconfig FB_FOREIGN_ENDIAN
+ bool "Framebuffer foreign endianness support"
+ depends on FB
+ ---help---
+ This menu will let you enable support for the framebuffers with
+ non-native endianness (e.g. Little-Endian framebuffer on a
+ Big-Endian machine). Most probably you don't have such hardware,
+ so it's safe to say "n" here.
+
+choice
+ prompt "Choice endianness support"
+ depends on FB_FOREIGN_ENDIAN
+
+config FB_BOTH_ENDIAN
+ bool "Support for Big- and Little-Endian framebuffers"
+
+config FB_BIG_ENDIAN
+ bool "Support for Big-Endian framebuffers only"
+
+config FB_LITTLE_ENDIAN
+ bool "Support for Little-Endian framebuffers only"
+
+endchoice
+
config FB_SYS_FOPS
tristate
depends on FB
@@ -149,6 +173,16 @@ config FB_DEFERRED_IO
depends on FB
default y
+config FB_METRONOME
+ tristate
+ depends on FB
+ depends on FB_DEFERRED_IO
+
+config FB_HECUBA
+ tristate
+ depends on FB
+ depends on FB_DEFERRED_IO
+
config FB_SVGALIB
tristate
depends on FB
@@ -546,7 +580,7 @@ config FB_VGA16
config FB_BF54X_LQ043
tristate "SHARP LQ043 TFT LCD (BF548 EZKIT)"
- depends on FB && (BF54x)
+ depends on FB && (BF54x) && !BF542
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -674,20 +708,18 @@ config FB_IMAC
help
This is the frame buffer device driver for the Intel-based Macintosh
-config FB_HECUBA
- tristate "Hecuba board support"
+config FB_N411
+ tristate "N411 Apollo/Hecuba devkit support"
depends on FB && X86 && MMU
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS
select FB_DEFERRED_IO
+ select FB_HECUBA
help
- This enables support for the Hecuba board. This driver was tested
- with an E-Ink 800x600 display and x86 SBCs through a 16 bit GPIO
- interface (8 bit data, 4 bit control). If you anticipate using
- this driver, say Y or M; otherwise say N. You must specify the
- GPIO IO address to be used for setting control and data.
+ This enables support for the Apollo display controller in its
+ Hecuba form using the n411 devkit.
config FB_HGA
tristate "Hercules mono graphics support"
@@ -1087,7 +1119,7 @@ config FB_CARILLO_RANCH
This driver supports the LE80578 (Carillo Ranch) board
config FB_INTEL
- tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G support (EXPERIMENTAL)"
+ tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support (EXPERIMENTAL)"
depends on FB && EXPERIMENTAL && PCI && X86
select AGP
select AGP_INTEL
@@ -1097,7 +1129,7 @@ config FB_INTEL
select FB_CFB_IMAGEBLIT
help
This driver supports the on-board graphics built in to the Intel
- 830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM chipsets.
+ 830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM chipsets.
Say Y if you have and plan to use such a board.
If you say Y here and want DDC/I2C support you must first say Y to
@@ -1779,6 +1811,16 @@ config FB_MBX_DEBUG
If unsure, say N.
+config FB_FSL_DIU
+ tristate "Freescale DIU framebuffer support"
+ depends on FB && FSL_SOC
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select PPC_LIB_RHEAP
+ ---help---
+ Framebuffer driver for the Freescale SoC DIU
+
config FB_W100
tristate "W100 frame buffer support"
depends on FB && PXA_SHARPSL
@@ -1893,19 +1935,18 @@ config FB_XILINX
framebuffer. ML300 carries a 640*480 LCD display on the board,
ML403 uses a standard DB15 VGA connector.
-config FB_METRONOME
- tristate "Metronome display controller support"
+config FB_AM200EPD
+ tristate "AM-200 E-Ink EPD devkit support"
depends on FB && ARCH_PXA && MMU
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS
select FB_DEFERRED_IO
+ select FB_METRONOME
help
- This enables support for the Metronome display controller. Tested
- with an E-Ink 800x600 display and Gumstix Connex through an AMLCD
- interface. Please read <file:Documentation/fb/metronomefb.txt>
- for more information.
+ This enables support for the Metronome display controller used on
+ the E-Ink AM-200 EPD devkit.
config FB_VIRTUAL
tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index f172b9b73314..04bca35403ff 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_FB_DEFERRED_IO) += fb_defio.o
# Hardware specific drivers go first
obj-$(CONFIG_FB_AMIGA) += amifb.o c2p.o
+obj-$(CONFIG_FB_AM200EPD) += am200epd.o
obj-$(CONFIG_FB_ARC) += arcfb.o
obj-$(CONFIG_FB_CLPS711X) += clps711xfb.o
obj-$(CONFIG_FB_CYBER2000) += cyber2000fb.o
@@ -107,6 +108,7 @@ obj-$(CONFIG_FB_METRONOME) += metronomefb.o
obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o
obj-$(CONFIG_FB_IMX) += imxfb.o
obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o
+obj-$(CONFIG_FB_FSL_DIU) += fsl-diu-fb.o
obj-$(CONFIG_FB_PNX4008_DUM) += pnx4008/
obj-$(CONFIG_FB_PNX4008_DUM_RGB) += pnx4008/
obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o
diff --git a/drivers/video/am200epd.c b/drivers/video/am200epd.c
new file mode 100644
index 000000000000..51e26c1f5e8b
--- /dev/null
+++ b/drivers/video/am200epd.c
@@ -0,0 +1,295 @@
+/*
+ * linux/drivers/video/am200epd.c -- Platform device for AM200 EPD kit
+ *
+ * Copyright (C) 2008, Jaya Kumar
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
+ *
+ * This work was made possible by help and equipment support from E-Ink
+ * Corporation. http://support.eink.com/community
+ *
+ * This driver is written to be used with the Metronome display controller.
+ * on the AM200 EPD prototype kit/development kit with an E-Ink 800x600
+ * Vizplex EPD on a Gumstix board using the Lyre interface board.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/irq.h>
+
+#include <video/metronomefb.h>
+
+#include <asm/arch/pxa-regs.h>
+
+/* register offsets for gpio control */
+#define LED_GPIO_PIN 51
+#define STDBY_GPIO_PIN 48
+#define RST_GPIO_PIN 49
+#define RDY_GPIO_PIN 32
+#define ERR_GPIO_PIN 17
+#define PCBPWR_GPIO_PIN 16
+
+#define AF_SEL_GPIO_N 0x3
+#define GAFR0_U_OFFSET(pin) ((pin - 16) * 2)
+#define GAFR1_L_OFFSET(pin) ((pin - 32) * 2)
+#define GAFR1_U_OFFSET(pin) ((pin - 48) * 2)
+#define GPDR1_OFFSET(pin) (pin - 32)
+#define GPCR1_OFFSET(pin) (pin - 32)
+#define GPSR1_OFFSET(pin) (pin - 32)
+#define GPCR0_OFFSET(pin) (pin)
+#define GPSR0_OFFSET(pin) (pin)
+
+static void am200_set_gpio_output(int pin, int val)
+{
+ u8 index;
+
+ index = pin >> 4;
+
+ switch (index) {
+ case 1:
+ if (val)
+ GPSR0 |= (1 << GPSR0_OFFSET(pin));
+ else
+ GPCR0 |= (1 << GPCR0_OFFSET(pin));
+ break;
+ case 2:
+ break;
+ case 3:
+ if (val)
+ GPSR1 |= (1 << GPSR1_OFFSET(pin));
+ else
+ GPCR1 |= (1 << GPCR1_OFFSET(pin));
+ break;
+ default:
+ printk(KERN_ERR "unimplemented\n");
+ }
+}
+
+static void __devinit am200_init_gpio_pin(int pin, int dir)
+{
+ u8 index;
+ /* dir 0 is output, 1 is input
+ - do 2 things here:
+ - set gpio alternate function to standard gpio
+ - set gpio direction to input or output */
+
+ index = pin >> 4;
+ switch (index) {
+ case 1:
+ GAFR0_U &= ~(AF_SEL_GPIO_N << GAFR0_U_OFFSET(pin));
+
+ if (dir)
+ GPDR0 &= ~(1 << pin);
+ else
+ GPDR0 |= (1 << pin);
+ break;
+ case 2:
+ GAFR1_L &= ~(AF_SEL_GPIO_N << GAFR1_L_OFFSET(pin));
+
+ if (dir)
+ GPDR1 &= ~(1 << GPDR1_OFFSET(pin));
+ else
+ GPDR1 |= (1 << GPDR1_OFFSET(pin));
+ break;
+ case 3:
+ GAFR1_U &= ~(AF_SEL_GPIO_N << GAFR1_U_OFFSET(pin));
+
+ if (dir)
+ GPDR1 &= ~(1 << GPDR1_OFFSET(pin));
+ else
+ GPDR1 |= (1 << GPDR1_OFFSET(pin));
+ break;
+ default:
+ printk(KERN_ERR "unimplemented\n");
+ }
+}
+
+static void am200_init_gpio_regs(struct metronomefb_par *par)
+{
+ am200_init_gpio_pin(LED_GPIO_PIN, 0);
+ am200_set_gpio_output(LED_GPIO_PIN, 0);
+
+ am200_init_gpio_pin(STDBY_GPIO_PIN, 0);
+ am200_set_gpio_output(STDBY_GPIO_PIN, 0);
+
+ am200_init_gpio_pin(RST_GPIO_PIN, 0);
+ am200_set_gpio_output(RST_GPIO_PIN, 0);
+
+ am200_init_gpio_pin(RDY_GPIO_PIN, 1);
+
+ am200_init_gpio_pin(ERR_GPIO_PIN, 1);
+
+ am200_init_gpio_pin(PCBPWR_GPIO_PIN, 0);
+ am200_set_gpio_output(PCBPWR_GPIO_PIN, 0);
+}
+
+static void am200_disable_lcd_controller(struct metronomefb_par *par)
+{
+ LCSR = 0xffffffff; /* Clear LCD Status Register */
+ LCCR0 |= LCCR0_DIS; /* Disable LCD Controller */
+
+ /* we reset and just wait for things to settle */
+ msleep(200);
+}
+
+static void am200_enable_lcd_controller(struct metronomefb_par *par)
+{
+ LCSR = 0xffffffff;
+ FDADR0 = par->metromem_desc_dma;
+ LCCR0 |= LCCR0_ENB;
+}
+
+static void am200_init_lcdc_regs(struct metronomefb_par *par)
+{
+ /* here we do:
+ - disable the lcd controller
+ - setup lcd control registers
+ - setup dma descriptor
+ - reenable lcd controller
+ */
+
+ /* disable the lcd controller */
+ am200_disable_lcd_controller(par);
+
+ /* setup lcd control registers */
+ LCCR0 = LCCR0_LDM | LCCR0_SFM | LCCR0_IUM | LCCR0_EFM | LCCR0_PAS
+ | LCCR0_QDM | LCCR0_BM | LCCR0_OUM;
+
+ LCCR1 = (par->info->var.xres/2 - 1) /* pixels per line */
+ | (27 << 10) /* hsync pulse width - 1 */
+ | (33 << 16) /* eol pixel count */
+ | (33 << 24); /* bol pixel count */
+
+ LCCR2 = (par->info->var.yres - 1) /* lines per panel */
+ | (24 << 10) /* vsync pulse width - 1 */
+ | (2 << 16) /* eof pixel count */
+ | (0 << 24); /* bof pixel count */
+
+ LCCR3 = 2 /* pixel clock divisor */
+ | (24 << 8) /* AC Bias pin freq */
+ | LCCR3_16BPP /* BPP */
+ | LCCR3_PCP; /* PCP falling edge */
+
+}
+
+static void am200_post_dma_setup(struct metronomefb_par *par)
+{
+ par->metromem_desc->mFDADR0 = par->metromem_desc_dma;
+ par->metromem_desc->mFSADR0 = par->metromem_dma;
+ par->metromem_desc->mFIDR0 = 0;
+ par->metromem_desc->mLDCMD0 = par->info->var.xres
+ * par->info->var.yres;
+ am200_enable_lcd_controller(par);
+}
+
+static void am200_free_irq(struct fb_info *info)
+{
+ free_irq(IRQ_GPIO(RDY_GPIO_PIN), info);
+}
+
+static irqreturn_t am200_handle_irq(int irq, void *dev_id)
+{
+ struct fb_info *info = dev_id;
+ struct metronomefb_par *par = info->par;
+
+ wake_up_interruptible(&par->waitq);
+ return IRQ_HANDLED;
+}
+
+static int am200_setup_irq(struct fb_info *info)
+{
+ int retval;
+
+ retval = request_irq(IRQ_GPIO(RDY_GPIO_PIN), am200_handle_irq,
+ IRQF_DISABLED, "AM200", info);
+ if (retval) {
+ printk(KERN_ERR "am200epd: request_irq failed: %d\n", retval);
+ return retval;
+ }
+
+ return set_irq_type(IRQ_GPIO(RDY_GPIO_PIN), IRQT_FALLING);
+}
+
+static void am200_set_rst(struct metronomefb_par *par, int state)
+{
+ am200_set_gpio_output(RST_GPIO_PIN, state);
+}
+
+static void am200_set_stdby(struct metronomefb_par *par, int state)
+{
+ am200_set_gpio_output(STDBY_GPIO_PIN, state);
+}
+
+static int am200_wait_event(struct metronomefb_par *par)
+{
+ return wait_event_timeout(par->waitq, (GPLR1 & 0x01), HZ);
+}
+
+static int am200_wait_event_intr(struct metronomefb_par *par)
+{
+ return wait_event_interruptible_timeout(par->waitq, (GPLR1 & 0x01), HZ);
+}
+
+static struct metronome_board am200_board = {
+ .owner = THIS_MODULE,
+ .free_irq = am200_free_irq,
+ .setup_irq = am200_setup_irq,
+ .init_gpio_regs = am200_init_gpio_regs,
+ .init_lcdc_regs = am200_init_lcdc_regs,
+ .post_dma_setup = am200_post_dma_setup,
+ .set_rst = am200_set_rst,
+ .set_stdby = am200_set_stdby,
+ .met_wait_event = am200_wait_event,
+ .met_wait_event_intr = am200_wait_event_intr,
+};
+
+static struct platform_device *am200_device;
+
+static int __init am200_init(void)
+{
+ int ret;
+
+ /* request our platform independent driver */
+ request_module("metronomefb");
+
+ am200_device = platform_device_alloc("metronomefb", -1);
+ if (!am200_device)
+ return -ENOMEM;
+
+ platform_device_add_data(am200_device, &am200_board,
+ sizeof(am200_board));
+
+ /* this _add binds metronomefb to am200. metronomefb refcounts am200 */
+ ret = platform_device_add(am200_device);
+
+ if (ret)
+ platform_device_put(am200_device);
+
+ return ret;
+}
+
+static void __exit am200_exit(void)
+{
+ platform_device_unregister(am200_device);
+}
+
+module_init(am200_init);
+module_exit(am200_exit);
+
+MODULE_DESCRIPTION("board driver for am200 metronome epd kit");
+MODULE_AUTHOR("Jaya Kumar");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index 4c9ec3f58c52..e6492c1048bf 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -96,7 +96,7 @@
#endif
#ifdef DEBUG
-# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
#else
# define DPRINTK(fmt, args...)
#endif
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
index 8a1b07c74394..5001bd4ef466 100644
--- a/drivers/video/arkfb.c
+++ b/drivers/video/arkfb.c
@@ -101,7 +101,7 @@ static const struct svga_timing_regs ark_timing_regs = {
/* Module parameters */
-static char *mode = "640x480-8@60";
+static char *mode_option __devinitdata = "640x480-8@60";
#ifdef CONFIG_MTRR
static int mtrr = 1;
@@ -111,8 +111,10 @@ MODULE_AUTHOR("(c) 2007 Ondrej Zajicek <santiago@crfreenet.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("fbdev driver for ARK 2000PV");
-module_param(mode, charp, 0444);
-MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc)");
+module_param(mode_option, charp, 0444);
+MODULE_PARM_DESC(mode_option, "Default video mode ('640x480-8@60', etc)");
+module_param_named(mode, mode_option, charp, 0444);
+MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc) (deprecated)");
#ifdef CONFIG_MTRR
module_param(mtrr, int, 0444);
@@ -941,7 +943,7 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
}
/* Allocate and fill driver data structure */
- info = framebuffer_alloc(sizeof(struct arkfb_info), NULL);
+ info = framebuffer_alloc(sizeof(struct arkfb_info), &(dev->dev));
if (! info) {
dev_err(&(dev->dev), "cannot allocate memory\n");
return -ENOMEM;
@@ -956,20 +958,20 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
/* Prepare PCI device */
rc = pci_enable_device(dev);
if (rc < 0) {
- dev_err(&(dev->dev), "cannot enable PCI device\n");
+ dev_err(info->dev, "cannot enable PCI device\n");
goto err_enable_device;
}
rc = pci_request_regions(dev, "arkfb");
if (rc < 0) {
- dev_err(&(dev->dev), "cannot reserve framebuffer region\n");
+ dev_err(info->dev, "cannot reserve framebuffer region\n");
goto err_request_regions;
}
par->dac = ics5342_init(ark_dac_read_regs, ark_dac_write_regs, info);
if (! par->dac) {
rc = -ENOMEM;
- dev_err(&(dev->dev), "RAMDAC initialization failed\n");
+ dev_err(info->dev, "RAMDAC initialization failed\n");
goto err_dac;
}
@@ -980,7 +982,7 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
info->screen_base = pci_iomap(dev, 0, 0);
if (! info->screen_base) {
rc = -ENOMEM;
- dev_err(&(dev->dev), "iomap for framebuffer failed\n");
+ dev_err(info->dev, "iomap for framebuffer failed\n");
goto err_iomap;
}
@@ -999,22 +1001,22 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
info->pseudo_palette = (void*) (par->pseudo_palette);
/* Prepare startup mode */
- rc = fb_find_mode(&(info->var), info, mode, NULL, 0, NULL, 8);
+ rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
if (! ((rc == 1) || (rc == 2))) {
rc = -EINVAL;
- dev_err(&(dev->dev), "mode %s not found\n", mode);
+ dev_err(info->dev, "mode %s not found\n", mode_option);
goto err_find_mode;
}
rc = fb_alloc_cmap(&info->cmap, 256, 0);
if (rc < 0) {
- dev_err(&(dev->dev), "cannot allocate colormap\n");
+ dev_err(info->dev, "cannot allocate colormap\n");
goto err_alloc_cmap;
}
rc = register_framebuffer(info);
if (rc < 0) {
- dev_err(&(dev->dev), "cannot register framebugger\n");
+ dev_err(info->dev, "cannot register framebugger\n");
goto err_reg_fb;
}
@@ -1088,7 +1090,7 @@ static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
struct fb_info *info = pci_get_drvdata(dev);
struct arkfb_info *par = info->par;
- dev_info(&(dev->dev), "suspend\n");
+ dev_info(info->dev, "suspend\n");
acquire_console_sem();
mutex_lock(&(par->open_lock));
@@ -1119,7 +1121,7 @@ static int ark_pci_resume (struct pci_dev* dev)
struct fb_info *info = pci_get_drvdata(dev);
struct arkfb_info *par = info->par;
- dev_info(&(dev->dev), "resume\n");
+ dev_info(info->dev, "resume\n");
acquire_console_sem();
mutex_lock(&(par->open_lock));
@@ -1190,7 +1192,7 @@ static int __init arkfb_init(void)
return -ENODEV;
if (option && *option)
- mode = option;
+ mode_option = option;
#endif
pr_debug("arkfb: initializing\n");
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index 5d4fbaa53a6c..dff35474b854 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -1270,7 +1270,7 @@ again:
gstart = (prescale / 2 + plen * left_margin) / prescale;
/* gend1 is for hde (gend-gstart multiple of align), shifter's xres */
- gend1 = gstart + ((xres + align - 1) / align) * align * plen / prescale;
+ gend1 = gstart + roundup(xres, align) * plen / prescale;
/* gend2 is for hbb, visible xres (rest to gend1 is cut off by hblank) */
gend2 = gstart + xres * plen / prescale;
par->HHT = plen * (left_margin + xres + right_margin) /
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index fc65c02306dd..8ffdf3578768 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -31,7 +31,8 @@
#define ATMEL_LCDC_CVAL_DEFAULT 0xc8
#define ATMEL_LCDC_DMA_BURST_LEN 8
-#if defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91CAP9)
+#if defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91CAP9) || \
+ defined(CONFIG_ARCH_AT91SAM9RL)
#define ATMEL_LCDC_FIFO_SIZE 2048
#else
#define ATMEL_LCDC_FIFO_SIZE 512
@@ -250,6 +251,8 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo)
return -ENOMEM;
}
+ memset(info->screen_base, 0, info->fix.smem_len);
+
return 0;
}
@@ -336,19 +339,35 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
break;
case 15:
case 16:
- var->red.offset = 0;
+ if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
+ /* RGB:565 mode */
+ var->red.offset = 11;
+ var->blue.offset = 0;
+ var->green.length = 6;
+ } else {
+ /* BGR:555 mode */
+ var->red.offset = 0;
+ var->blue.offset = 10;
+ var->green.length = 5;
+ }
var->green.offset = 5;
- var->blue.offset = 10;
- var->red.length = var->green.length = var->blue.length = 5;
+ var->red.length = var->blue.length = 5;
break;
case 32:
var->transp.offset = 24;
var->transp.length = 8;
/* fall through */
case 24:
- var->red.offset = 0;
+ if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
+ /* RGB:888 mode */
+ var->red.offset = 16;
+ var->blue.offset = 0;
+ } else {
+ /* BGR:888 mode */
+ var->red.offset = 0;
+ var->blue.offset = 16;
+ }
var->green.offset = 8;
- var->blue.offset = 16;
var->red.length = var->green.length = var->blue.length = 8;
break;
default:
@@ -634,7 +653,6 @@ static int __init atmel_lcdfb_init_fbinfo(struct atmel_lcdfb_info *sinfo)
struct fb_info *info = sinfo->info;
int ret = 0;
- memset_io(info->screen_base, 0, info->fix.smem_len);
info->var.activate |= FB_ACTIVATE_FORCE | FB_ACTIVATE_NOW;
dev_info(info->device,
@@ -696,6 +714,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
sinfo->atmel_lcdfb_power_control = pdata_sinfo->atmel_lcdfb_power_control;
sinfo->guard_time = pdata_sinfo->guard_time;
sinfo->lcdcon_is_backlight = pdata_sinfo->lcdcon_is_backlight;
+ sinfo->lcd_wiring_mode = pdata_sinfo->lcd_wiring_mode;
} else {
dev_err(dev, "cannot get default configuration\n");
goto free_info;
@@ -764,6 +783,11 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
if (!info->screen_base)
goto release_intmem;
+
+ /*
+ * Don't clear the framebuffer -- someone may have set
+ * up a splash image.
+ */
} else {
/* alocate memory buffer */
ret = atmel_lcdfb_alloc_video_memory(sinfo);
@@ -903,10 +927,42 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+
+static int atmel_lcdfb_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ struct fb_info *info = platform_get_drvdata(pdev);
+ struct atmel_lcdfb_info *sinfo = info->par;
+
+ sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
+ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, 0);
+ if (sinfo->atmel_lcdfb_power_control)
+ sinfo->atmel_lcdfb_power_control(0);
+ atmel_lcdfb_stop_clock(sinfo);
+ return 0;
+}
+
+static int atmel_lcdfb_resume(struct platform_device *pdev)
+{
+ struct fb_info *info = platform_get_drvdata(pdev);
+ struct atmel_lcdfb_info *sinfo = info->par;
+
+ atmel_lcdfb_start_clock(sinfo);
+ if (sinfo->atmel_lcdfb_power_control)
+ sinfo->atmel_lcdfb_power_control(1);
+ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, sinfo->saved_lcdcon);
+ return 0;
+}
+
+#else
+#define atmel_lcdfb_suspend NULL
+#define atmel_lcdfb_resume NULL
+#endif
+
static struct platform_driver atmel_lcdfb_driver = {
.remove = __exit_p(atmel_lcdfb_remove),
-
-// FIXME need suspend, resume
+ .suspend = atmel_lcdfb_suspend,
+ .resume = atmel_lcdfb_resume,
.driver = {
.name = "atmel_lcdfb",
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index cbd3308b6690..24ee96c4e9e9 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -91,7 +91,7 @@
#undef DEBUG
#ifdef DEBUG
-#define DBG(fmt, args...) printk(KERN_DEBUG "aty128fb: %s " fmt, __FUNCTION__, ##args);
+#define DBG(fmt, args...) printk(KERN_DEBUG "aty128fb: %s " fmt, __func__, ##args);
#else
#define DBG(fmt, args...)
#endif
@@ -1885,7 +1885,7 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i
/* range check to make sure */
if (ent->driver_data < ARRAY_SIZE(r128_family))
- strncat(video_card, r128_family[ent->driver_data], sizeof(video_card));
+ strlcat(video_card, r128_family[ent->driver_data], sizeof(video_card));
printk(KERN_INFO "aty128fb: %s [chip rev 0x%x] ", video_card, chip_rev);
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 62f9c6e387cc..e4bcf5376a99 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2621,10 +2621,13 @@ static int __devinit aty_init(struct fb_info *info)
#endif /* CONFIG_FB_ATY_CT */
info->var = var;
- fb_alloc_cmap(&info->cmap, 256, 0);
+ if (fb_alloc_cmap(&info->cmap, 256, 0) < 0)
+ goto aty_init_exit;
- if (register_framebuffer(info) < 0)
+ if (register_framebuffer(info) < 0) {
+ fb_dealloc_cmap(&info->cmap);
goto aty_init_exit;
+ }
fb_list = info;
diff --git a/drivers/video/aty/mach64_ct.c b/drivers/video/aty/mach64_ct.c
index cc9e9779b75f..c50c7cf26fe9 100644
--- a/drivers/video/aty/mach64_ct.c
+++ b/drivers/video/aty/mach64_ct.c
@@ -197,7 +197,7 @@ static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
pll->dsp_config = (dsp_precision << 20) | (pll->dsp_loop_latency << 16) | dsp_xclks;
#ifdef DEBUG
printk("atyfb(%s): dsp_config 0x%08x, dsp_on_off 0x%08x\n",
- __FUNCTION__, pll->dsp_config, pll->dsp_on_off);
+ __func__, pll->dsp_config, pll->dsp_on_off);
#endif
return 0;
}
@@ -225,7 +225,7 @@ static int aty_valid_pll_ct(const struct fb_info *info, u32 vclk_per, struct pll
(par->ref_clk_per * pll->pll_ref_div);
#ifdef DEBUG
printk("atyfb(%s): pllvclk=%d MHz, vclk=%d MHz\n",
- __FUNCTION__, pllvclk, pllvclk / pll->vclk_post_div_real);
+ __func__, pllvclk, pllvclk / pll->vclk_post_div_real);
#endif
pll->pll_vclk_cntl = 0x03; /* VCLK = PLL_VCLK/VCLKx_POST */
@@ -269,7 +269,7 @@ static u32 aty_pll_to_var_ct(const struct fb_info *info, const union aty_pll *pl
}
#endif
#ifdef DEBUG
- printk("atyfb(%s): calculated 0x%08X(%i)\n", __FUNCTION__, ret, ret);
+ printk("atyfb(%s): calculated 0x%08X(%i)\n", __func__, ret, ret);
#endif
return ret;
}
@@ -284,11 +284,11 @@ void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll)
#ifdef DEBUG
printk("atyfb(%s): about to program:\n"
"pll_ext_cntl=0x%02x pll_gen_cntl=0x%02x pll_vclk_cntl=0x%02x\n",
- __FUNCTION__,
+ __func__,
pll->ct.pll_ext_cntl, pll->ct.pll_gen_cntl, pll->ct.pll_vclk_cntl);
printk("atyfb(%s): setting clock %lu for FeedBackDivider %i, ReferenceDivider %i, PostDivider %i(%i)\n",
- __FUNCTION__,
+ __func__,
par->clk_wr_offset, pll->ct.vclk_fb_div,
pll->ct.pll_ref_div, pll->ct.vclk_post_div, pll->ct.vclk_post_div_real);
#endif
@@ -428,7 +428,7 @@ static int __devinit aty_init_pll_ct(const struct fb_info *info,
#ifdef DEBUG
printk("atyfb(%s): mclk_fb_mult=%d, xclk_post_div=%d\n",
- __FUNCTION__, pll->ct.mclk_fb_mult, pll->ct.xclk_post_div);
+ __func__, pll->ct.mclk_fb_mult, pll->ct.xclk_post_div);
#endif
memcntl = aty_ld_le32(MEM_CNTL, par);
@@ -540,7 +540,7 @@ static int __devinit aty_init_pll_ct(const struct fb_info *info,
pllmclk = (1000000 * pll->ct.mclk_fb_mult * pll->ct.mclk_fb_div) /
(par->ref_clk_per * pll->ct.pll_ref_div);
printk("atyfb(%s): pllmclk=%d MHz, xclk=%d MHz\n",
- __FUNCTION__, pllmclk, pllmclk / pll->ct.xclk_post_div_real);
+ __func__, pllmclk, pllmclk / pll->ct.xclk_post_div_real);
#endif
if (M64_HAS(SDRAM_MAGIC_PLL) && (par->ram_type >= SDRAM))
@@ -581,7 +581,7 @@ static int __devinit aty_init_pll_ct(const struct fb_info *info,
pllsclk = (1000000 * 2 * pll->ct.sclk_fb_div) /
(par->ref_clk_per * pll->ct.pll_ref_div);
printk("atyfb(%s): use sclk, pllsclk=%d MHz, sclk=mclk=%d MHz\n",
- __FUNCTION__, pllsclk, pllsclk / sclk_post_div_real);
+ __func__, pllsclk, pllsclk / sclk_post_div_real);
#endif
}
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 62867cb63fef..72cd0d2f14ec 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -52,11 +52,14 @@
#define RADEON_VERSION "0.2.0"
+#include "radeonfb.h"
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
+#include <linux/ctype.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -91,7 +94,6 @@
#include "../edid.h" // MOVE THAT TO include/video
#include "ati_ids.h"
-#include "radeonfb.h"
#define MAX_MAPPED_VRAM (2048*2048*4)
#define MIN_MAPPED_VRAM (1024*768*1)
@@ -1488,7 +1490,7 @@ static void radeon_calc_pll_regs(struct radeonfb_info *rinfo, struct radeon_regs
freq = rinfo->pll.ppll_max;
if (freq*12 < rinfo->pll.ppll_min)
freq = rinfo->pll.ppll_min / 12;
- RTRACE("freq = %lu, PLL min = %u, PLL max = %u\n",
+ pr_debug("freq = %lu, PLL min = %u, PLL max = %u\n",
freq, rinfo->pll.ppll_min, rinfo->pll.ppll_max);
for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
@@ -1509,7 +1511,7 @@ static void radeon_calc_pll_regs(struct radeonfb_info *rinfo, struct radeon_regs
post_div = &post_divs[post_div->bitvalue];
pll_output_freq = post_div->divider * freq;
}
- RTRACE("ref_div = %d, ref_clk = %d, output_freq = %d\n",
+ pr_debug("ref_div = %d, ref_clk = %d, output_freq = %d\n",
rinfo->pll.ref_div, rinfo->pll.ref_clk,
pll_output_freq);
@@ -1519,7 +1521,7 @@ static void radeon_calc_pll_regs(struct radeonfb_info *rinfo, struct radeon_regs
post_div = &post_divs[post_div->bitvalue];
pll_output_freq = post_div->divider * freq;
}
- RTRACE("ref_div = %d, ref_clk = %d, output_freq = %d\n",
+ pr_debug("ref_div = %d, ref_clk = %d, output_freq = %d\n",
rinfo->pll.ref_div, rinfo->pll.ref_clk,
pll_output_freq);
@@ -1528,9 +1530,9 @@ static void radeon_calc_pll_regs(struct radeonfb_info *rinfo, struct radeon_regs
regs->ppll_ref_div = rinfo->pll.ref_div;
regs->ppll_div_3 = fb_div | (post_div->bitvalue << 16);
- RTRACE("post div = 0x%x\n", post_div->bitvalue);
- RTRACE("fb_div = 0x%x\n", fb_div);
- RTRACE("ppll_div_3 = 0x%x\n", regs->ppll_div_3);
+ pr_debug("post div = 0x%x\n", post_div->bitvalue);
+ pr_debug("fb_div = 0x%x\n", fb_div);
+ pr_debug("ppll_div_3 = 0x%x\n", regs->ppll_div_3);
}
static int radeonfb_set_par(struct fb_info *info)
@@ -1602,9 +1604,9 @@ static int radeonfb_set_par(struct fb_info *info)
dotClock = 1000000000 / pixClock;
freq = dotClock / 10; /* x100 */
- RTRACE("hStart = %d, hEnd = %d, hTotal = %d\n",
+ pr_debug("hStart = %d, hEnd = %d, hTotal = %d\n",
hSyncStart, hSyncEnd, hTotal);
- RTRACE("vStart = %d, vEnd = %d, vTotal = %d\n",
+ pr_debug("vStart = %d, vEnd = %d, vTotal = %d\n",
vSyncStart, vSyncEnd, vTotal);
hsync_wid = (hSyncEnd - hSyncStart) / 8;
@@ -1713,16 +1715,16 @@ static int radeonfb_set_par(struct fb_info *info)
newmode->surf_info[i] = 0;
}
- RTRACE("h_total_disp = 0x%x\t hsync_strt_wid = 0x%x\n",
+ pr_debug("h_total_disp = 0x%x\t hsync_strt_wid = 0x%x\n",
newmode->crtc_h_total_disp, newmode->crtc_h_sync_strt_wid);
- RTRACE("v_total_disp = 0x%x\t vsync_strt_wid = 0x%x\n",
+ pr_debug("v_total_disp = 0x%x\t vsync_strt_wid = 0x%x\n",
newmode->crtc_v_total_disp, newmode->crtc_v_sync_strt_wid);
rinfo->bpp = mode->bits_per_pixel;
rinfo->depth = depth;
- RTRACE("pixclock = %lu\n", (unsigned long)pixClock);
- RTRACE("freq = %lu\n", (unsigned long)freq);
+ pr_debug("pixclock = %lu\n", (unsigned long)pixClock);
+ pr_debug("freq = %lu\n", (unsigned long)freq);
/* We use PPLL_DIV_3 */
newmode->clk_cntl_index = 0x300;
@@ -1986,7 +1988,7 @@ static void fixup_memory_mappings(struct radeonfb_info *rinfo)
if (rinfo->has_CRTC2)
OUTREG(CRTC2_GEN_CNTL, save_crtc2_gen_cntl);
- RTRACE("aper_base: %08x MC_FB_LOC to: %08x, MC_AGP_LOC to: %08x\n",
+ pr_debug("aper_base: %08x MC_FB_LOC to: %08x, MC_AGP_LOC to: %08x\n",
aper_base,
((aper_base + aper_size - 1) & 0xffff0000) | (aper_base >> 16),
0xffff0000 | (agp_base >> 16));
@@ -2083,7 +2085,7 @@ static void radeon_identify_vram(struct radeonfb_info *rinfo)
* ToDo: identify these cases
*/
- RTRACE("radeonfb (%s): Found %ldk of %s %d bits wide videoram\n",
+ pr_debug("radeonfb (%s): Found %ldk of %s %d bits wide videoram\n",
pci_name(rinfo->pdev),
rinfo->video_ram / 1024,
rinfo->vram_ddr ? "DDR" : "SDRAM",
@@ -2158,8 +2160,9 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
struct fb_info *info;
struct radeonfb_info *rinfo;
int ret;
+ unsigned char c1, c2;
- RTRACE("radeonfb_pci_register BEGIN\n");
+ pr_debug("radeonfb_pci_register BEGIN\n");
/* Enable device in PCI config */
ret = pci_enable_device(pdev);
@@ -2185,9 +2188,15 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
rinfo->lvds_timer.function = radeon_lvds_timer_func;
rinfo->lvds_timer.data = (unsigned long)rinfo;
- strcpy(rinfo->name, "ATI Radeon XX ");
- rinfo->name[11] = ent->device >> 8;
- rinfo->name[12] = ent->device & 0xFF;
+ c1 = ent->device >> 8;
+ c2 = ent->device & 0xff;
+ if (isprint(c1) && isprint(c2))
+ snprintf(rinfo->name, sizeof(rinfo->name),
+ "ATI Radeon %x \"%c%c\"", ent->device & 0xffff, c1, c2);
+ else
+ snprintf(rinfo->name, sizeof(rinfo->name),
+ "ATI Radeon %x", ent->device & 0xffff);
+
rinfo->family = ent->driver_data & CHIP_FAMILY_MASK;
rinfo->chipset = pdev->device;
rinfo->has_CRTC2 = (ent->driver_data & CHIP_HAS_CRTC2) != 0;
@@ -2278,7 +2287,7 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
goto err_unmap_rom;
}
- RTRACE("radeonfb (%s): mapped %ldk videoram\n", pci_name(rinfo->pdev),
+ pr_debug("radeonfb (%s): mapped %ldk videoram\n", pci_name(rinfo->pdev),
rinfo->mapped_vram/1024);
/*
@@ -2373,7 +2382,7 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
if (rinfo->bios_seg)
radeon_unmap_ROM(rinfo, pdev);
- RTRACE("radeonfb_pci_register END\n");
+ pr_debug("radeonfb_pci_register END\n");
return 0;
err_unmap_fb:
diff --git a/drivers/video/aty/radeon_i2c.c b/drivers/video/aty/radeon_i2c.c
index 7db9de681716..f9e7c29ad9bf 100644
--- a/drivers/video/aty/radeon_i2c.c
+++ b/drivers/video/aty/radeon_i2c.c
@@ -1,3 +1,5 @@
+#include "radeonfb.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
@@ -11,7 +13,6 @@
#include <asm/io.h>
#include <video/radeon.h>
-#include "radeonfb.h"
#include "../edid.h"
static void radeon_gpio_setscl(void* data, int state)
@@ -77,7 +78,7 @@ static int radeon_setup_i2c_bus(struct radeon_i2c_chan *chan, const char *name)
chan->algo.setscl = radeon_gpio_setscl;
chan->algo.getsda = radeon_gpio_getsda;
chan->algo.getscl = radeon_gpio_getscl;
- chan->algo.udelay = 40;
+ chan->algo.udelay = 10;
chan->algo.timeout = 20;
chan->algo.data = chan;
@@ -148,21 +149,21 @@ int radeon_probe_i2c_connector(struct radeonfb_info *rinfo, int conn,
if (out_edid)
*out_edid = edid;
if (!edid) {
- RTRACE("radeonfb: I2C (port %d) ... not found\n", conn);
+ pr_debug("radeonfb: I2C (port %d) ... not found\n", conn);
return MT_NONE;
}
if (edid[0x14] & 0x80) {
/* Fix detection using BIOS tables */
if (rinfo->is_mobility /*&& conn == ddc_dvi*/ &&
(INREG(LVDS_GEN_CNTL) & LVDS_ON)) {
- RTRACE("radeonfb: I2C (port %d) ... found LVDS panel\n", conn);
+ pr_debug("radeonfb: I2C (port %d) ... found LVDS panel\n", conn);
return MT_LCD;
} else {
- RTRACE("radeonfb: I2C (port %d) ... found TMDS panel\n", conn);
+ pr_debug("radeonfb: I2C (port %d) ... found TMDS panel\n", conn);
return MT_DFP;
}
}
- RTRACE("radeonfb: I2C (port %d) ... found CRT display\n", conn);
+ pr_debug("radeonfb: I2C (port %d) ... found CRT display\n", conn);
return MT_CRT;
}
diff --git a/drivers/video/aty/radeon_monitor.c b/drivers/video/aty/radeon_monitor.c
index 2030ed813429..b4d4b88afc09 100644
--- a/drivers/video/aty/radeon_monitor.c
+++ b/drivers/video/aty/radeon_monitor.c
@@ -69,11 +69,11 @@ static int __devinit radeon_parse_montype_prop(struct device_node *dp, u8 **out_
u8 *tmp;
int i, mt = MT_NONE;
- RTRACE("analyzing OF properties...\n");
+ pr_debug("analyzing OF properties...\n");
pmt = of_get_property(dp, "display-type", NULL);
if (!pmt)
return MT_NONE;
- RTRACE("display-type: %s\n", pmt);
+ pr_debug("display-type: %s\n", pmt);
/* OF says "LCD" for DFP as well, we discriminate from the caller of this
* function
*/
@@ -117,7 +117,7 @@ static int __devinit radeon_probe_OF_head(struct radeonfb_info *rinfo, int head_
{
struct device_node *dp;
- RTRACE("radeon_probe_OF_head\n");
+ pr_debug("radeon_probe_OF_head\n");
dp = rinfo->of_node;
while (dp == NULL)
@@ -135,7 +135,7 @@ static int __devinit radeon_probe_OF_head(struct radeonfb_info *rinfo, int head_
if (!pname)
return MT_NONE;
len = strlen(pname);
- RTRACE("head: %s (letter: %c, head_no: %d)\n",
+ pr_debug("head: %s (letter: %c, head_no: %d)\n",
pname, pname[len-1], head_no);
if (pname[len-1] == 'A' && head_no == 0) {
int mt = radeon_parse_montype_prop(dp, out_EDID, 0);
@@ -185,7 +185,7 @@ static int __devinit radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo)
rinfo->panel_info.xres, rinfo->panel_info.yres);
rinfo->panel_info.pwr_delay = BIOS_IN16(tmp + 44);
- RTRACE("BIOS provided panel power delay: %d\n", rinfo->panel_info.pwr_delay);
+ pr_debug("BIOS provided panel power delay: %d\n", rinfo->panel_info.pwr_delay);
if (rinfo->panel_info.pwr_delay > 2000 || rinfo->panel_info.pwr_delay <= 0)
rinfo->panel_info.pwr_delay = 2000;
@@ -199,16 +199,16 @@ static int __devinit radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo)
rinfo->panel_info.fbk_divider > 3) {
rinfo->panel_info.use_bios_dividers = 1;
printk(KERN_INFO "radeondb: BIOS provided dividers will be used\n");
- RTRACE("ref_divider = %x\n", rinfo->panel_info.ref_divider);
- RTRACE("post_divider = %x\n", rinfo->panel_info.post_divider);
- RTRACE("fbk_divider = %x\n", rinfo->panel_info.fbk_divider);
+ pr_debug("ref_divider = %x\n", rinfo->panel_info.ref_divider);
+ pr_debug("post_divider = %x\n", rinfo->panel_info.post_divider);
+ pr_debug("fbk_divider = %x\n", rinfo->panel_info.fbk_divider);
}
- RTRACE("Scanning BIOS table ...\n");
+ pr_debug("Scanning BIOS table ...\n");
for(i=0; i<32; i++) {
tmp0 = BIOS_IN16(tmp+64+i*2);
if (tmp0 == 0)
break;
- RTRACE(" %d x %d\n", BIOS_IN16(tmp0), BIOS_IN16(tmp0+2));
+ pr_debug(" %d x %d\n", BIOS_IN16(tmp0), BIOS_IN16(tmp0+2));
if ((BIOS_IN16(tmp0) == rinfo->panel_info.xres) &&
(BIOS_IN16(tmp0+2) == rinfo->panel_info.yres)) {
rinfo->panel_info.hblank = (BIOS_IN16(tmp0+17) - BIOS_IN16(tmp0+19)) * 8;
@@ -227,19 +227,19 @@ static int __devinit radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo)
/* Mark panel infos valid */
rinfo->panel_info.valid = 1;
- RTRACE("Found panel in BIOS table:\n");
- RTRACE(" hblank: %d\n", rinfo->panel_info.hblank);
- RTRACE(" hOver_plus: %d\n", rinfo->panel_info.hOver_plus);
- RTRACE(" hSync_width: %d\n", rinfo->panel_info.hSync_width);
- RTRACE(" vblank: %d\n", rinfo->panel_info.vblank);
- RTRACE(" vOver_plus: %d\n", rinfo->panel_info.vOver_plus);
- RTRACE(" vSync_width: %d\n", rinfo->panel_info.vSync_width);
- RTRACE(" clock: %d\n", rinfo->panel_info.clock);
+ pr_debug("Found panel in BIOS table:\n");
+ pr_debug(" hblank: %d\n", rinfo->panel_info.hblank);
+ pr_debug(" hOver_plus: %d\n", rinfo->panel_info.hOver_plus);
+ pr_debug(" hSync_width: %d\n", rinfo->panel_info.hSync_width);
+ pr_debug(" vblank: %d\n", rinfo->panel_info.vblank);
+ pr_debug(" vOver_plus: %d\n", rinfo->panel_info.vOver_plus);
+ pr_debug(" vSync_width: %d\n", rinfo->panel_info.vSync_width);
+ pr_debug(" clock: %d\n", rinfo->panel_info.clock);
return 1;
}
}
- RTRACE("Didn't find panel in BIOS table !\n");
+ pr_debug("Didn't find panel in BIOS table !\n");
return 0;
}
@@ -271,18 +271,18 @@ static void __devinit radeon_parse_connector_info(struct radeonfb_info *rinfo)
* DEBUG is enabled
*/
chips = BIOS_IN8(offset++) >> 4;
- RTRACE("%d chips in connector info\n", chips);
+ pr_debug("%d chips in connector info\n", chips);
for (i = 0; i < chips; i++) {
tmp = BIOS_IN8(offset++);
connectors = tmp & 0x0f;
- RTRACE(" - chip %d has %d connectors\n", tmp >> 4, connectors);
+ pr_debug(" - chip %d has %d connectors\n", tmp >> 4, connectors);
for (conn = 0; ; conn++) {
tmp = BIOS_IN16(offset);
if (tmp == 0)
break;
offset += 2;
type = (tmp >> 12) & 0x0f;
- RTRACE(" * connector %d of type %d (%s) : %04x\n",
+ pr_debug(" * connector %d of type %d (%s) : %04x\n",
conn, type, __conn_type_table[type], tmp);
}
}
@@ -449,7 +449,7 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo,
* a layout for each card ?
*/
- RTRACE("Using specified monitor layout: %s", monitor_layout);
+ pr_debug("Using specified monitor layout: %s", monitor_layout);
#ifdef CONFIG_FB_RADEON_I2C
if (!ignore_edid) {
if (rinfo->mon1_type != MT_NONE)
@@ -479,9 +479,9 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo,
* Auto-detecting display type (well... trying to ...)
*/
- RTRACE("Starting monitor auto detection...\n");
+ pr_debug("Starting monitor auto detection...\n");
-#if DEBUG && defined(CONFIG_FB_RADEON_I2C)
+#if defined(DEBUG) && defined(CONFIG_FB_RADEON_I2C)
{
u8 *EDIDs[4] = { NULL, NULL, NULL, NULL };
int mon_types[4] = {MT_NONE, MT_NONE, MT_NONE, MT_NONE};
@@ -756,7 +756,7 @@ void __devinit radeon_check_modes(struct radeonfb_info *rinfo, const char *mode_
if (!rinfo->panel_info.use_bios_dividers && rinfo->mon1_type != MT_CRT
&& rinfo->mon1_EDID) {
struct fb_var_screeninfo var;
- RTRACE("Parsing EDID data for panel info\n");
+ pr_debug("Parsing EDID data for panel info\n");
if (fb_parse_edid(rinfo->mon1_EDID, &var) == 0) {
if (var.xres >= rinfo->panel_info.xres &&
var.yres >= rinfo->panel_info.yres)
@@ -776,7 +776,7 @@ void __devinit radeon_check_modes(struct radeonfb_info *rinfo, const char *mode_
if (rinfo->mon1_type != MT_CRT && rinfo->panel_info.valid) {
struct fb_var_screeninfo *var = &info->var;
- RTRACE("Setting up default mode based on panel info\n");
+ pr_debug("Setting up default mode based on panel info\n");
var->xres = rinfo->panel_info.xres;
var->yres = rinfo->panel_info.yres;
var->xres_virtual = rinfo->panel_info.xres;
@@ -824,7 +824,7 @@ void __devinit radeon_check_modes(struct radeonfb_info *rinfo, const char *mode_
int dbsize;
char modename[32];
- RTRACE("Guessing panel info...\n");
+ pr_debug("Guessing panel info...\n");
if (rinfo->panel_info.xres == 0 || rinfo->panel_info.yres == 0) {
u32 tmp = INREG(FP_HORZ_STRETCH) & HORZ_PANEL_SIZE;
rinfo->panel_info.xres = ((tmp >> HORZ_PANEL_SHIFT) + 1) * 8;
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index 5eac1ce52e72..c347e38cd0b0 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -1,6 +1,10 @@
#ifndef __RADEONFB_H__
#define __RADEONFB_H__
+#ifdef CONFIG_FB_RADEON_DEBUG
+#define DEBUG 1
+#endif
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -365,22 +369,6 @@ struct radeonfb_info {
/*
- * Debugging stuffs
- */
-#ifdef CONFIG_FB_RADEON_DEBUG
-#define DEBUG 1
-#else
-#define DEBUG 0
-#endif
-
-#if DEBUG
-#define RTRACE printk
-#else
-#define RTRACE if(0) printk
-#endif
-
-
-/*
* IO macros
*/
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index eefba3d0e4b9..49834a67a623 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -336,7 +336,7 @@ static int bfin_bf54x_fb_check_var(struct fb_var_screeninfo *var,
{
if (var->bits_per_pixel != LCD_BPP) {
- pr_debug("%s: depth not supported: %u BPP\n", __FUNCTION__,
+ pr_debug("%s: depth not supported: %u BPP\n", __func__,
var->bits_per_pixel);
return -EINVAL;
}
@@ -345,7 +345,7 @@ static int bfin_bf54x_fb_check_var(struct fb_var_screeninfo *var,
info->var.xres_virtual != var->xres_virtual ||
info->var.yres_virtual != var->yres_virtual) {
pr_debug("%s: Resolution not supported: X%u x Y%u \n",
- __FUNCTION__, var->xres, var->yres);
+ __func__, var->xres, var->yres);
return -EINVAL;
}
@@ -355,7 +355,7 @@ static int bfin_bf54x_fb_check_var(struct fb_var_screeninfo *var,
if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) {
pr_debug("%s: Memory Limit requested yres_virtual = %u\n",
- __FUNCTION__, var->yres_virtual);
+ __func__, var->yres_virtual);
return -ENOMEM;
}
@@ -652,7 +652,7 @@ static int __init bfin_bf54x_probe(struct platform_device *pdev)
goto out7;
}
- if (request_irq(info->irq, (void *)bfin_bf54x_irq_error, IRQF_DISABLED,
+ if (request_irq(info->irq, bfin_bf54x_irq_error, IRQF_DISABLED,
"PPI ERROR", info) < 0) {
printk(KERN_ERR DRIVER_NAME
": unable to request PPI ERROR IRQ\n");
diff --git a/drivers/video/cfbcopyarea.c b/drivers/video/cfbcopyarea.c
index b07e419b12d2..df03f3776dcc 100644
--- a/drivers/video/cfbcopyarea.c
+++ b/drivers/video/cfbcopyarea.c
@@ -44,15 +44,16 @@
*/
static void
-bitcpy(unsigned long __iomem *dst, int dst_idx, const unsigned long __iomem *src,
- int src_idx, int bits, unsigned n, u32 bswapmask)
+bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ const unsigned long __iomem *src, int src_idx, int bits,
+ unsigned n, u32 bswapmask)
{
unsigned long first, last;
int const shift = dst_idx-src_idx;
int left, right;
- first = fb_shifted_pixels_mask_long(dst_idx, bswapmask);
- last = ~fb_shifted_pixels_mask_long((dst_idx+n) % bits, bswapmask);
+ first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
+ last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);
if (!shift) {
// Same alignment for source and dest
@@ -202,8 +203,9 @@ bitcpy(unsigned long __iomem *dst, int dst_idx, const unsigned long __iomem *src
*/
static void
-bitcpy_rev(unsigned long __iomem *dst, int dst_idx, const unsigned long __iomem *src,
- int src_idx, int bits, unsigned n, u32 bswapmask)
+bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ const unsigned long __iomem *src, int src_idx, int bits,
+ unsigned n, u32 bswapmask)
{
unsigned long first, last;
int shift;
@@ -221,8 +223,9 @@ bitcpy_rev(unsigned long __iomem *dst, int dst_idx, const unsigned long __iomem
shift = dst_idx-src_idx;
- first = fb_shifted_pixels_mask_long(bits - 1 - dst_idx, bswapmask);
- last = ~fb_shifted_pixels_mask_long(bits - 1 - ((dst_idx-n) % bits), bswapmask);
+ first = fb_shifted_pixels_mask_long(p, bits - 1 - dst_idx, bswapmask);
+ last = ~fb_shifted_pixels_mask_long(p, bits - 1 - ((dst_idx-n) % bits),
+ bswapmask);
if (!shift) {
// Same alignment for source and dest
@@ -404,7 +407,7 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
dst_idx &= (bytes - 1);
src += src_idx >> (ffs(bits) - 1);
src_idx &= (bytes - 1);
- bitcpy_rev(dst, dst_idx, src, src_idx, bits,
+ bitcpy_rev(p, dst, dst_idx, src, src_idx, bits,
width*p->var.bits_per_pixel, bswapmask);
}
} else {
@@ -413,7 +416,7 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
dst_idx &= (bytes - 1);
src += src_idx >> (ffs(bits) - 1);
src_idx &= (bytes - 1);
- bitcpy(dst, dst_idx, src, src_idx, bits,
+ bitcpy(p, dst, dst_idx, src, src_idx, bits,
width*p->var.bits_per_pixel, bswapmask);
dst_idx += bits_per_line;
src_idx += bits_per_line;
diff --git a/drivers/video/cfbfillrect.c b/drivers/video/cfbfillrect.c
index 23d70a12e4da..64b35766b2a2 100644
--- a/drivers/video/cfbfillrect.c
+++ b/drivers/video/cfbfillrect.c
@@ -36,16 +36,16 @@
*/
static void
-bitfill_aligned(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
- unsigned n, int bits, u32 bswapmask)
+bitfill_aligned(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ unsigned long pat, unsigned n, int bits, u32 bswapmask)
{
unsigned long first, last;
if (!n)
return;
- first = fb_shifted_pixels_mask_long(dst_idx, bswapmask);
- last = ~fb_shifted_pixels_mask_long((dst_idx+n) % bits, bswapmask);
+ first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
+ last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);
if (dst_idx+n <= bits) {
// Single word
@@ -93,16 +93,16 @@ bitfill_aligned(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
*/
static void
-bitfill_unaligned(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
- int left, int right, unsigned n, int bits)
+bitfill_unaligned(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ unsigned long pat, int left, int right, unsigned n, int bits)
{
unsigned long first, last;
if (!n)
return;
- first = FB_SHIFT_HIGH(~0UL, dst_idx);
- last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits));
+ first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
+ last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
if (dst_idx+n <= bits) {
// Single word
@@ -147,8 +147,9 @@ bitfill_unaligned(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
* Aligned pattern invert using 32/64-bit memory accesses
*/
static void
-bitfill_aligned_rev(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
- unsigned n, int bits, u32 bswapmask)
+bitfill_aligned_rev(struct fb_info *p, unsigned long __iomem *dst,
+ int dst_idx, unsigned long pat, unsigned n, int bits,
+ u32 bswapmask)
{
unsigned long val = pat, dat;
unsigned long first, last;
@@ -156,8 +157,8 @@ bitfill_aligned_rev(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
if (!n)
return;
- first = fb_shifted_pixels_mask_long(dst_idx, bswapmask);
- last = ~fb_shifted_pixels_mask_long((dst_idx+n) % bits, bswapmask);
+ first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
+ last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);
if (dst_idx+n <= bits) {
// Single word
@@ -217,16 +218,17 @@ bitfill_aligned_rev(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
*/
static void
-bitfill_unaligned_rev(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
- int left, int right, unsigned n, int bits)
+bitfill_unaligned_rev(struct fb_info *p, unsigned long __iomem *dst,
+ int dst_idx, unsigned long pat, int left, int right,
+ unsigned n, int bits)
{
unsigned long first, last, dat;
if (!n)
return;
- first = FB_SHIFT_HIGH(~0UL, dst_idx);
- last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits));
+ first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
+ last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
if (dst_idx+n <= bits) {
// Single word
@@ -306,7 +308,8 @@ void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
p->fbops->fb_sync(p);
if (!left) {
u32 bswapmask = fb_compute_bswapmask(p);
- void (*fill_op32)(unsigned long __iomem *dst, int dst_idx,
+ void (*fill_op32)(struct fb_info *p,
+ unsigned long __iomem *dst, int dst_idx,
unsigned long pat, unsigned n, int bits,
u32 bswapmask) = NULL;
@@ -325,16 +328,17 @@ void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
while (height--) {
dst += dst_idx >> (ffs(bits) - 1);
dst_idx &= (bits - 1);
- fill_op32(dst, dst_idx, pat, width*bpp, bits, bswapmask);
+ fill_op32(p, dst, dst_idx, pat, width*bpp, bits,
+ bswapmask);
dst_idx += p->fix.line_length*8;
}
} else {
int right;
int r;
int rot = (left-dst_idx) % bpp;
- void (*fill_op)(unsigned long __iomem *dst, int dst_idx,
- unsigned long pat, int left, int right,
- unsigned n, int bits) = NULL;
+ void (*fill_op)(struct fb_info *p, unsigned long __iomem *dst,
+ int dst_idx, unsigned long pat, int left,
+ int right, unsigned n, int bits) = NULL;
/* rotate pattern to correct start position */
pat = pat << rot | pat >> (bpp-rot);
@@ -355,7 +359,7 @@ void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
while (height--) {
dst += dst_idx >> (ffs(bits) - 1);
dst_idx &= (bits - 1);
- fill_op(dst, dst_idx, pat, left, right,
+ fill_op(p, dst, dst_idx, pat, left, right,
width*bpp, bits);
r = (p->fix.line_length*8) % bpp;
pat = pat << (bpp-r) | pat >> r;
diff --git a/drivers/video/cfbimgblt.c b/drivers/video/cfbimgblt.c
index f598907b42ad..baed57d3cfff 100644
--- a/drivers/video/cfbimgblt.c
+++ b/drivers/video/cfbimgblt.c
@@ -38,35 +38,31 @@
#define DEBUG
#ifdef DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt,__FUNCTION__,## args)
+#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt,__func__,## args)
#else
#define DPRINTK(fmt, args...)
#endif
-static const u32 cfb_tab8[] = {
-#if defined(__BIG_ENDIAN)
+static const u32 cfb_tab8_be[] = {
0x00000000,0x000000ff,0x0000ff00,0x0000ffff,
0x00ff0000,0x00ff00ff,0x00ffff00,0x00ffffff,
0xff000000,0xff0000ff,0xff00ff00,0xff00ffff,
0xffff0000,0xffff00ff,0xffffff00,0xffffffff
-#elif defined(__LITTLE_ENDIAN)
+};
+
+static const u32 cfb_tab8_le[] = {
0x00000000,0xff000000,0x00ff0000,0xffff0000,
0x0000ff00,0xff00ff00,0x00ffff00,0xffffff00,
0x000000ff,0xff0000ff,0x00ff00ff,0xffff00ff,
0x0000ffff,0xff00ffff,0x00ffffff,0xffffffff
-#else
-#error FIXME: No endianness??
-#endif
};
-static const u32 cfb_tab16[] = {
-#if defined(__BIG_ENDIAN)
+static const u32 cfb_tab16_be[] = {
0x00000000, 0x0000ffff, 0xffff0000, 0xffffffff
-#elif defined(__LITTLE_ENDIAN)
+};
+
+static const u32 cfb_tab16_le[] = {
0x00000000, 0xffff0000, 0x0000ffff, 0xffffffff
-#else
-#error FIXME: No endianness??
-#endif
};
static const u32 cfb_tab32[] = {
@@ -98,7 +94,8 @@ static inline void color_imageblit(const struct fb_image *image,
val = 0;
if (start_index) {
- u32 start_mask = ~fb_shifted_pixels_mask_u32(start_index, bswapmask);
+ u32 start_mask = ~fb_shifted_pixels_mask_u32(p,
+ start_index, bswapmask);
val = FB_READL(dst) & start_mask;
shift = start_index;
}
@@ -108,20 +105,21 @@ static inline void color_imageblit(const struct fb_image *image,
color = palette[*src];
else
color = *src;
- color <<= FB_LEFT_POS(bpp);
- val |= FB_SHIFT_HIGH(color, shift ^ bswapmask);
+ color <<= FB_LEFT_POS(p, bpp);
+ val |= FB_SHIFT_HIGH(p, color, shift ^ bswapmask);
if (shift >= null_bits) {
FB_WRITEL(val, dst++);
val = (shift == null_bits) ? 0 :
- FB_SHIFT_LOW(color, 32 - shift);
+ FB_SHIFT_LOW(p, color, 32 - shift);
}
shift += bpp;
shift &= (32 - 1);
src++;
}
if (shift) {
- u32 end_mask = fb_shifted_pixels_mask_u32(shift, bswapmask);
+ u32 end_mask = fb_shifted_pixels_mask_u32(p, shift,
+ bswapmask);
FB_WRITEL((FB_READL(dst) & end_mask) | val, dst);
}
@@ -152,8 +150,8 @@ static inline void slow_imageblit(const struct fb_image *image, struct fb_info *
u32 bswapmask = fb_compute_bswapmask(p);
dst2 = (u32 __iomem *) dst1;
- fgcolor <<= FB_LEFT_POS(bpp);
- bgcolor <<= FB_LEFT_POS(bpp);
+ fgcolor <<= FB_LEFT_POS(p, bpp);
+ bgcolor <<= FB_LEFT_POS(p, bpp);
for (i = image->height; i--; ) {
shift = val = 0;
@@ -164,7 +162,8 @@ static inline void slow_imageblit(const struct fb_image *image, struct fb_info *
/* write leading bits */
if (start_index) {
- u32 start_mask = ~fb_shifted_pixels_mask_u32(start_index, bswapmask);
+ u32 start_mask = ~fb_shifted_pixels_mask_u32(p,
+ start_index, bswapmask);
val = FB_READL(dst) & start_mask;
shift = start_index;
}
@@ -172,13 +171,13 @@ static inline void slow_imageblit(const struct fb_image *image, struct fb_info *
while (j--) {
l--;
color = (*s & (1 << l)) ? fgcolor : bgcolor;
- val |= FB_SHIFT_HIGH(color, shift ^ bswapmask);
+ val |= FB_SHIFT_HIGH(p, color, shift ^ bswapmask);
/* Did the bitshift spill bits to the next long? */
if (shift >= null_bits) {
FB_WRITEL(val, dst++);
val = (shift == null_bits) ? 0 :
- FB_SHIFT_LOW(color,32 - shift);
+ FB_SHIFT_LOW(p, color, 32 - shift);
}
shift += bpp;
shift &= (32 - 1);
@@ -187,7 +186,8 @@ static inline void slow_imageblit(const struct fb_image *image, struct fb_info *
/* write trailing bits */
if (shift) {
- u32 end_mask = fb_shifted_pixels_mask_u32(shift, bswapmask);
+ u32 end_mask = fb_shifted_pixels_mask_u32(p, shift,
+ bswapmask);
FB_WRITEL((FB_READL(dst) & end_mask) | val, dst);
}
@@ -223,13 +223,13 @@ static inline void fast_imageblit(const struct fb_image *image, struct fb_info *
u32 __iomem *dst;
const u32 *tab = NULL;
int i, j, k;
-
+
switch (bpp) {
case 8:
- tab = cfb_tab8;
+ tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
break;
case 16:
- tab = cfb_tab16;
+ tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
break;
case 32:
default:
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index f7e2d5add831..35ac9d956b3d 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -81,7 +81,7 @@
/* debug output */
#ifdef CIRRUSFB_DEBUG
#define DPRINTK(fmt, args...) \
- printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+ printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
#else
#define DPRINTK(fmt, args...)
#endif
@@ -91,7 +91,7 @@
#define assert(expr) \
if (!(expr)) { \
printk("Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __FUNCTION__, __LINE__); \
+ #expr, __FILE__, __func__, __LINE__); \
}
#else
#define assert(expr)
@@ -3117,7 +3117,7 @@ static void bestclock(long freq, long *best, long *nom,
}
}
}
- d = ((143181 * n) + f - 1) / f;
+ d = DIV_ROUND_UP(143181 * n, f);
if ((d >= 7) && (d <= 63)) {
if (d > 31)
d = (d / 2) * 2;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 022282494d3f..8eda7b60df8f 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -92,7 +92,7 @@
#include "fbcon.h"
#ifdef FBCONDEBUG
-# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
#else
# define DPRINTK(fmt, args...)
#endif
@@ -620,8 +620,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
if (fb_get_color_depth(&info->var, &info->fix) == 1)
erase &= ~0x400;
logo_height = fb_prepare_logo(info, ops->rotate);
- logo_lines = (logo_height + vc->vc_font.height - 1) /
- vc->vc_font.height;
+ logo_lines = DIV_ROUND_UP(logo_height, vc->vc_font.height);
q = (unsigned short *) (vc->vc_origin +
vc->vc_size_row * rows);
step = logo_lines * cols;
diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h
index 3706307e70ed..0135e0395456 100644
--- a/drivers/video/console/fbcon.h
+++ b/drivers/video/console/fbcon.h
@@ -104,10 +104,14 @@ struct fbcon_ops {
#define attr_blink(s) \
((s) & 0x8000)
-#define mono_col(info) \
- (~(0xfff << (max((info)->var.green.length, \
- max((info)->var.red.length, \
- (info)->var.blue.length)))) & 0xff)
+
+static inline int mono_col(const struct fb_info *info)
+{
+ __u32 max_len;
+ max_len = max(info->var.green.length, info->var.red.length);
+ max_len = max(info->var.blue.length, max_len);
+ return ~(0xfff << (max_len & 0xff));
+}
static inline int attr_col_ec(int shift, struct vc_data *vc,
struct fb_info *info, int is_fg)
diff --git a/drivers/video/fb_draw.h b/drivers/video/fb_draw.h
index a2a0618d86a5..1db622192bde 100644
--- a/drivers/video/fb_draw.h
+++ b/drivers/video/fb_draw.h
@@ -94,41 +94,44 @@ static inline unsigned long fb_rev_pixels_in_long(unsigned long val,
return val;
}
-static inline u32 fb_shifted_pixels_mask_u32(u32 index, u32 bswapmask)
+static inline u32 fb_shifted_pixels_mask_u32(struct fb_info *p, u32 index,
+ u32 bswapmask)
{
u32 mask;
if (!bswapmask) {
- mask = FB_SHIFT_HIGH(~(u32)0, index);
+ mask = FB_SHIFT_HIGH(p, ~(u32)0, index);
} else {
- mask = 0xff << FB_LEFT_POS(8);
- mask = FB_SHIFT_LOW(mask, index & (bswapmask)) & mask;
- mask = FB_SHIFT_HIGH(mask, index & ~(bswapmask));
+ mask = 0xff << FB_LEFT_POS(p, 8);
+ mask = FB_SHIFT_LOW(p, mask, index & (bswapmask)) & mask;
+ mask = FB_SHIFT_HIGH(p, mask, index & ~(bswapmask));
#if defined(__i386__) || defined(__x86_64__)
/* Shift argument is limited to 0 - 31 on x86 based CPU's */
if(index + bswapmask < 32)
#endif
- mask |= FB_SHIFT_HIGH(~(u32)0,
+ mask |= FB_SHIFT_HIGH(p, ~(u32)0,
(index + bswapmask) & ~(bswapmask));
}
return mask;
}
-static inline unsigned long fb_shifted_pixels_mask_long(u32 index, u32 bswapmask)
+static inline unsigned long fb_shifted_pixels_mask_long(struct fb_info *p,
+ u32 index,
+ u32 bswapmask)
{
unsigned long mask;
if (!bswapmask) {
- mask = FB_SHIFT_HIGH(~0UL, index);
+ mask = FB_SHIFT_HIGH(p, ~0UL, index);
} else {
- mask = 0xff << FB_LEFT_POS(8);
- mask = FB_SHIFT_LOW(mask, index & (bswapmask)) & mask;
- mask = FB_SHIFT_HIGH(mask, index & ~(bswapmask));
+ mask = 0xff << FB_LEFT_POS(p, 8);
+ mask = FB_SHIFT_LOW(p, mask, index & (bswapmask)) & mask;
+ mask = FB_SHIFT_HIGH(p, mask, index & ~(bswapmask));
#if defined(__i386__) || defined(__x86_64__)
/* Shift argument is limited to 0 - 31 on x86 based CPU's */
if(index + bswapmask < BITS_PER_LONG)
#endif
- mask |= FB_SHIFT_HIGH(~0UL,
+ mask |= FB_SHIFT_HIGH(p, ~0UL,
(index + bswapmask) & ~(bswapmask));
}
return mask;
@@ -158,8 +161,8 @@ static inline unsigned long fb_rev_pixels_in_long(unsigned long val,
return val;
}
-#define fb_shifted_pixels_mask_u32(i, b) FB_SHIFT_HIGH(~(u32)0, (i))
-#define fb_shifted_pixels_mask_long(i, b) FB_SHIFT_HIGH(~0UL, (i))
+#define fb_shifted_pixels_mask_u32(p, i, b) FB_SHIFT_HIGH((p), ~(u32)0, (i))
+#define fb_shifted_pixels_mask_long(p, i, b) FB_SHIFT_HIGH((p), ~0UL, (i))
#define fb_compute_bswapmask(...) 0
#endif /* CONFIG_FB_CFB_REV_PIXELS_IN_BYTE */
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 01072f4b3e8f..776f7fcd2fbf 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/linux_logo.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/console.h>
#ifdef CONFIG_KMOD
#include <linux/kmod.h>
@@ -632,27 +633,51 @@ int fb_prepare_logo(struct fb_info *info, int rotate) { return 0; }
int fb_show_logo(struct fb_info *info, int rotate) { return 0; }
#endif /* CONFIG_LOGO */
-static int fbmem_read_proc(char *buf, char **start, off_t offset,
- int len, int *eof, void *private)
+static void *fb_seq_start(struct seq_file *m, loff_t *pos)
{
- struct fb_info **fi;
- int clen;
-
- clen = 0;
- for (fi = registered_fb; fi < &registered_fb[FB_MAX] && clen < 4000;
- fi++)
- if (*fi)
- clen += sprintf(buf + clen, "%d %s\n",
- (*fi)->node,
- (*fi)->fix.id);
- *start = buf + offset;
- if (clen > offset)
- clen -= offset;
- else
- clen = 0;
- return clen < len ? clen : len;
+ return (*pos < FB_MAX) ? pos : NULL;
+}
+
+static void *fb_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return (*pos < FB_MAX) ? pos : NULL;
+}
+
+static void fb_seq_stop(struct seq_file *m, void *v)
+{
+}
+
+static int fb_seq_show(struct seq_file *m, void *v)
+{
+ int i = *(loff_t *)v;
+ struct fb_info *fi = registered_fb[i];
+
+ if (fi)
+ seq_printf(m, "%d %s\n", fi->node, fi->fix.id);
+ return 0;
+}
+
+static const struct seq_operations proc_fb_seq_ops = {
+ .start = fb_seq_start,
+ .next = fb_seq_next,
+ .stop = fb_seq_stop,
+ .show = fb_seq_show,
+};
+
+static int proc_fb_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &proc_fb_seq_ops);
}
+static const struct file_operations fb_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = proc_fb_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
static ssize_t
fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
@@ -1057,7 +1082,7 @@ fb_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
case FBIOPUT_CON2FBMAP:
if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
return - EFAULT;
- if (con2fb.console < 0 || con2fb.console > MAX_NR_CONSOLES)
+ if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
return -EINVAL;
if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
return -EINVAL;
@@ -1352,6 +1377,32 @@ static const struct file_operations fb_fops = {
struct class *fb_class;
EXPORT_SYMBOL(fb_class);
+
+static int fb_check_foreignness(struct fb_info *fi)
+{
+ const bool foreign_endian = fi->flags & FBINFO_FOREIGN_ENDIAN;
+
+ fi->flags &= ~FBINFO_FOREIGN_ENDIAN;
+
+#ifdef __BIG_ENDIAN
+ fi->flags |= foreign_endian ? 0 : FBINFO_BE_MATH;
+#else
+ fi->flags |= foreign_endian ? FBINFO_BE_MATH : 0;
+#endif /* __BIG_ENDIAN */
+
+ if (fi->flags & FBINFO_BE_MATH && !fb_be_math(fi)) {
+ pr_err("%s: enable CONFIG_FB_BIG_ENDIAN to "
+ "support this framebuffer\n", fi->fix.id);
+ return -ENOSYS;
+ } else if (!(fi->flags & FBINFO_BE_MATH) && fb_be_math(fi)) {
+ pr_err("%s: enable CONFIG_FB_LITTLE_ENDIAN to "
+ "support this framebuffer\n", fi->fix.id);
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
/**
* register_framebuffer - registers a frame buffer device
* @fb_info: frame buffer info structure
@@ -1371,6 +1422,10 @@ register_framebuffer(struct fb_info *fb_info)
if (num_registered_fb == FB_MAX)
return -ENXIO;
+
+ if (fb_check_foreignness(fb_info))
+ return -ENOSYS;
+
num_registered_fb++;
for (i = 0 ; i < FB_MAX; i++)
if (!registered_fb[i])
@@ -1503,7 +1558,7 @@ void fb_set_suspend(struct fb_info *info, int state)
static int __init
fbmem_init(void)
{
- create_proc_read_entry("fb", 0, NULL, fbmem_read_proc, NULL);
+ proc_create("fb", 0, NULL, &fb_proc_fops);
if (register_chrdev(FB_MAJOR,"fb",&fb_fops))
printk("unable to get major %d for fb devs\n", FB_MAJOR);
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
new file mode 100644
index 000000000000..b50bb03cb5ab
--- /dev/null
+++ b/drivers/video/fsl-diu-fb.c
@@ -0,0 +1,1721 @@
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * Freescale DIU Frame Buffer device driver
+ *
+ * Authors: Hongjun Chen <hong-jun.chen@freescale.com>
+ * Paul Widmer <paul.widmer@freescale.com>
+ * Srikanth Srinivasan <srikanth.srinivasan@freescale.com>
+ * York Sun <yorksun@freescale.com>
+ *
+ * Based on imxfb.c Copyright (C) 2004 S.Hauer, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include <linux/of_platform.h>
+
+#include <sysdev/fsl_soc.h>
+#include "fsl-diu-fb.h"
+
+/*
+ * These parameters give default parameters
+ * for video output 1024x768,
+ * FIXME - change timing to proper amounts
+ * hsync 31.5kHz, vsync 60Hz
+ */
+static struct fb_videomode __devinitdata fsl_diu_default_mode = {
+ .refresh = 60,
+ .xres = 1024,
+ .yres = 768,
+ .pixclock = 15385,
+ .left_margin = 160,
+ .right_margin = 24,
+ .upper_margin = 29,
+ .lower_margin = 3,
+ .hsync_len = 136,
+ .vsync_len = 6,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+};
+
+static struct fb_videomode __devinitdata fsl_diu_mode_db[] = {
+ {
+ .name = "1024x768-60",
+ .refresh = 60,
+ .xres = 1024,
+ .yres = 768,
+ .pixclock = 15385,
+ .left_margin = 160,
+ .right_margin = 24,
+ .upper_margin = 29,
+ .lower_margin = 3,
+ .hsync_len = 136,
+ .vsync_len = 6,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
+ .name = "1024x768-70",
+ .refresh = 70,
+ .xres = 1024,
+ .yres = 768,
+ .pixclock = 16886,
+ .left_margin = 3,
+ .right_margin = 3,
+ .upper_margin = 2,
+ .lower_margin = 2,
+ .hsync_len = 40,
+ .vsync_len = 18,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
+ .name = "1024x768-75",
+ .refresh = 75,
+ .xres = 1024,
+ .yres = 768,
+ .pixclock = 15009,
+ .left_margin = 3,
+ .right_margin = 3,
+ .upper_margin = 2,
+ .lower_margin = 2,
+ .hsync_len = 80,
+ .vsync_len = 32,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
+ .name = "1280x1024-60",
+ .refresh = 60,
+ .xres = 1280,
+ .yres = 1024,
+ .pixclock = 9375,
+ .left_margin = 38,
+ .right_margin = 128,
+ .upper_margin = 2,
+ .lower_margin = 7,
+ .hsync_len = 216,
+ .vsync_len = 37,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
+ .name = "1280x1024-70",
+ .refresh = 70,
+ .xres = 1280,
+ .yres = 1024,
+ .pixclock = 9380,
+ .left_margin = 6,
+ .right_margin = 6,
+ .upper_margin = 4,
+ .lower_margin = 4,
+ .hsync_len = 60,
+ .vsync_len = 94,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
+ .name = "1280x1024-75",
+ .refresh = 75,
+ .xres = 1280,
+ .yres = 1024,
+ .pixclock = 9380,
+ .left_margin = 6,
+ .right_margin = 6,
+ .upper_margin = 4,
+ .lower_margin = 4,
+ .hsync_len = 60,
+ .vsync_len = 15,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
+ .name = "320x240", /* for AOI only */
+ .refresh = 60,
+ .xres = 320,
+ .yres = 240,
+ .pixclock = 15385,
+ .left_margin = 0,
+ .right_margin = 0,
+ .upper_margin = 0,
+ .lower_margin = 0,
+ .hsync_len = 0,
+ .vsync_len = 0,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
+ .name = "1280x480-60",
+ .refresh = 60,
+ .xres = 1280,
+ .yres = 480,
+ .pixclock = 18939,
+ .left_margin = 353,
+ .right_margin = 47,
+ .upper_margin = 39,
+ .lower_margin = 4,
+ .hsync_len = 8,
+ .vsync_len = 2,
+ .sync = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+};
+
+static char *fb_mode = "1024x768-32@60";
+static unsigned long default_bpp = 32;
+static int monitor_port;
+
+#if defined(CONFIG_NOT_COHERENT_CACHE)
+static u8 *coherence_data;
+static size_t coherence_data_size;
+static unsigned int d_cache_line_size;
+#endif
+
+static DEFINE_SPINLOCK(diu_lock);
+
+struct fsl_diu_data {
+ struct fb_info *fsl_diu_info[FSL_AOI_NUM - 1];
+ /*FSL_AOI_NUM has one dummy AOI */
+ struct device_attribute dev_attr;
+ struct diu_ad *dummy_ad;
+ void *dummy_aoi_virt;
+ unsigned int irq;
+ int fb_enabled;
+ int monitor_port;
+};
+
+struct mfb_info {
+ int index;
+ int type;
+ char *id;
+ int registered;
+ int blank;
+ unsigned long pseudo_palette[16];
+ struct diu_ad *ad;
+ int cursor_reset;
+ unsigned char g_alpha;
+ unsigned int count;
+ int x_aoi_d; /* aoi display x offset to physical screen */
+ int y_aoi_d; /* aoi display y offset to physical screen */
+ struct fsl_diu_data *parent;
+};
+
+
+static struct mfb_info mfb_template[] = {
+ { /* AOI 0 for plane 0 */
+ .index = 0,
+ .type = MFB_TYPE_OUTPUT,
+ .id = "Panel0",
+ .registered = 0,
+ .count = 0,
+ .x_aoi_d = 0,
+ .y_aoi_d = 0,
+ },
+ { /* AOI 0 for plane 1 */
+ .index = 1,
+ .type = MFB_TYPE_OUTPUT,
+ .id = "Panel1 AOI0",
+ .registered = 0,
+ .g_alpha = 0xff,
+ .count = 0,
+ .x_aoi_d = 0,
+ .y_aoi_d = 0,
+ },
+ { /* AOI 1 for plane 1 */
+ .index = 2,
+ .type = MFB_TYPE_OUTPUT,
+ .id = "Panel1 AOI1",
+ .registered = 0,
+ .g_alpha = 0xff,
+ .count = 0,
+ .x_aoi_d = 0,
+ .y_aoi_d = 480,
+ },
+ { /* AOI 0 for plane 2 */
+ .index = 3,
+ .type = MFB_TYPE_OUTPUT,
+ .id = "Panel2 AOI0",
+ .registered = 0,
+ .g_alpha = 0xff,
+ .count = 0,
+ .x_aoi_d = 640,
+ .y_aoi_d = 0,
+ },
+ { /* AOI 1 for plane 2 */
+ .index = 4,
+ .type = MFB_TYPE_OUTPUT,
+ .id = "Panel2 AOI1",
+ .registered = 0,
+ .g_alpha = 0xff,
+ .count = 0,
+ .x_aoi_d = 640,
+ .y_aoi_d = 480,
+ },
+};
+
+static struct diu_hw dr = {
+ .mode = MFB_MODE1,
+ .reg_lock = __SPIN_LOCK_UNLOCKED(diu_hw.reg_lock),
+};
+
+static struct diu_pool pool;
+
+/* To allocate memory for framebuffer. First try __get_free_pages(). If it
+ * fails, try rh_alloc. The reason is __get_free_pages() cannot allocate
+ * very large memory (more than 4MB). We don't want to allocate all memory
+ * in rheap since small memory allocation/deallocation will fragment the
+ * rheap and make the furture large allocation fail.
+ */
+
+void *fsl_diu_alloc(unsigned long size, phys_addr_t *phys)
+{
+ void *virt;
+
+ pr_debug("size=%lu\n", size);
+
+ virt = (void *)__get_free_pages(GFP_DMA | __GFP_ZERO, get_order(size));
+ if (virt) {
+ *phys = virt_to_phys(virt);
+ pr_debug("virt %p, phys=%llx\n", virt, (uint64_t) *phys);
+ return virt;
+ }
+ if (!diu_ops.diu_mem) {
+ printk(KERN_INFO "%s: no diu_mem."
+ " To reserve more memory, put 'diufb=15M' "
+ "in the command line\n", __func__);
+ return NULL;
+ }
+
+ virt = (void *)rh_alloc(&diu_ops.diu_rh_info, size, "DIU");
+ if (virt) {
+ *phys = virt_to_bus(virt);
+ memset(virt, 0, size);
+ }
+
+ pr_debug("rh virt=%p phys=%lx\n", virt, *phys);
+
+ return virt;
+}
+
+void fsl_diu_free(void *p, unsigned long size)
+{
+ pr_debug("p=%p size=%lu\n", p, size);
+
+ if (!p)
+ return;
+
+ if ((p >= diu_ops.diu_mem) &&
+ (p < (diu_ops.diu_mem + diu_ops.diu_size))) {
+ pr_debug("rh\n");
+ rh_free(&diu_ops.diu_rh_info, (unsigned long) p);
+ } else {
+ pr_debug("dma\n");
+ free_pages((unsigned long)p, get_order(size));
+ }
+}
+
+static int fsl_diu_enable_panel(struct fb_info *info)
+{
+ struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par;
+ struct diu *hw = dr.diu_reg;
+ struct diu_ad *ad = mfbi->ad;
+ struct fsl_diu_data *machine_data = mfbi->parent;
+ int res = 0;
+
+ pr_debug("enable_panel index %d\n", mfbi->index);
+ if (mfbi->type != MFB_TYPE_OFF) {
+ switch (mfbi->index) {
+ case 0: /* plane 0 */
+ if (hw->desc[0] != ad->paddr)
+ out_be32(&hw->desc[0], ad->paddr);
+ break;
+ case 1: /* plane 1 AOI 0 */
+ cmfbi = machine_data->fsl_diu_info[2]->par;
+ if (hw->desc[1] != ad->paddr) { /* AOI0 closed */
+ if (cmfbi->count > 0) /* AOI1 open */
+ ad->next_ad =
+ cpu_to_le32(cmfbi->ad->paddr);
+ else
+ ad->next_ad = 0;
+ out_be32(&hw->desc[1], ad->paddr);
+ }
+ break;
+ case 3: /* plane 2 AOI 0 */
+ cmfbi = machine_data->fsl_diu_info[4]->par;
+ if (hw->desc[2] != ad->paddr) { /* AOI0 closed */
+ if (cmfbi->count > 0) /* AOI1 open */
+ ad->next_ad =
+ cpu_to_le32(cmfbi->ad->paddr);
+ else
+ ad->next_ad = 0;
+ out_be32(&hw->desc[2], ad->paddr);
+ }
+ break;
+ case 2: /* plane 1 AOI 1 */
+ pmfbi = machine_data->fsl_diu_info[1]->par;
+ ad->next_ad = 0;
+ if (hw->desc[1] == machine_data->dummy_ad->paddr)
+ out_be32(&hw->desc[1], ad->paddr);
+ else /* AOI0 open */
+ pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
+ break;
+ case 4: /* plane 2 AOI 1 */
+ pmfbi = machine_data->fsl_diu_info[3]->par;
+ ad->next_ad = 0;
+ if (hw->desc[2] == machine_data->dummy_ad->paddr)
+ out_be32(&hw->desc[2], ad->paddr);
+ else /* AOI0 was open */
+ pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ } else
+ res = -EINVAL;
+ return res;
+}
+
+static int fsl_diu_disable_panel(struct fb_info *info)
+{
+ struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par;
+ struct diu *hw = dr.diu_reg;
+ struct diu_ad *ad = mfbi->ad;
+ struct fsl_diu_data *machine_data = mfbi->parent;
+ int res = 0;
+
+ switch (mfbi->index) {
+ case 0: /* plane 0 */
+ if (hw->desc[0] != machine_data->dummy_ad->paddr)
+ out_be32(&hw->desc[0],
+ machine_data->dummy_ad->paddr);
+ break;
+ case 1: /* plane 1 AOI 0 */
+ cmfbi = machine_data->fsl_diu_info[2]->par;
+ if (cmfbi->count > 0) /* AOI1 is open */
+ out_be32(&hw->desc[1], cmfbi->ad->paddr);
+ /* move AOI1 to the first */
+ else /* AOI1 was closed */
+ out_be32(&hw->desc[1],
+ machine_data->dummy_ad->paddr);
+ /* close AOI 0 */
+ break;
+ case 3: /* plane 2 AOI 0 */
+ cmfbi = machine_data->fsl_diu_info[4]->par;
+ if (cmfbi->count > 0) /* AOI1 is open */
+ out_be32(&hw->desc[2], cmfbi->ad->paddr);
+ /* move AOI1 to the first */
+ else /* AOI1 was closed */
+ out_be32(&hw->desc[2],
+ machine_data->dummy_ad->paddr);
+ /* close AOI 0 */
+ break;
+ case 2: /* plane 1 AOI 1 */
+ pmfbi = machine_data->fsl_diu_info[1]->par;
+ if (hw->desc[1] != ad->paddr) {
+ /* AOI1 is not the first in the chain */
+ if (pmfbi->count > 0)
+ /* AOI0 is open, must be the first */
+ pmfbi->ad->next_ad = 0;
+ } else /* AOI1 is the first in the chain */
+ out_be32(&hw->desc[1], machine_data->dummy_ad->paddr);
+ /* close AOI 1 */
+ break;
+ case 4: /* plane 2 AOI 1 */
+ pmfbi = machine_data->fsl_diu_info[3]->par;
+ if (hw->desc[2] != ad->paddr) {
+ /* AOI1 is not the first in the chain */
+ if (pmfbi->count > 0)
+ /* AOI0 is open, must be the first */
+ pmfbi->ad->next_ad = 0;
+ } else /* AOI1 is the first in the chain */
+ out_be32(&hw->desc[2], machine_data->dummy_ad->paddr);
+ /* close AOI 1 */
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+
+ return res;
+}
+
+static void enable_lcdc(struct fb_info *info)
+{
+ struct diu *hw = dr.diu_reg;
+ struct mfb_info *mfbi = info->par;
+ struct fsl_diu_data *machine_data = mfbi->parent;
+
+ if (!machine_data->fb_enabled) {
+ out_be32(&hw->diu_mode, dr.mode);
+ machine_data->fb_enabled++;
+ }
+}
+
+static void disable_lcdc(struct fb_info *info)
+{
+ struct diu *hw = dr.diu_reg;
+ struct mfb_info *mfbi = info->par;
+ struct fsl_diu_data *machine_data = mfbi->parent;
+
+ if (machine_data->fb_enabled) {
+ out_be32(&hw->diu_mode, 0);
+ machine_data->fb_enabled = 0;
+ }
+}
+
+static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct mfb_info *lower_aoi_mfbi, *upper_aoi_mfbi, *mfbi = info->par;
+ struct fsl_diu_data *machine_data = mfbi->parent;
+ int available_height, upper_aoi_bottom, index = mfbi->index;
+ int lower_aoi_is_open, upper_aoi_is_open;
+ __u32 base_plane_width, base_plane_height, upper_aoi_height;
+
+ base_plane_width = machine_data->fsl_diu_info[0]->var.xres;
+ base_plane_height = machine_data->fsl_diu_info[0]->var.yres;
+
+ switch (index) {
+ case 0:
+ if (mfbi->x_aoi_d != 0)
+ mfbi->x_aoi_d = 0;
+ if (mfbi->y_aoi_d != 0)
+ mfbi->y_aoi_d = 0;
+ break;
+ case 1: /* AOI 0 */
+ case 3:
+ lower_aoi_mfbi = machine_data->fsl_diu_info[index+1]->par;
+ lower_aoi_is_open = lower_aoi_mfbi->count > 0 ? 1 : 0;
+ if (var->xres > base_plane_width)
+ var->xres = base_plane_width;
+ if ((mfbi->x_aoi_d + var->xres) > base_plane_width)
+ mfbi->x_aoi_d = base_plane_width - var->xres;
+
+ if (lower_aoi_is_open)
+ available_height = lower_aoi_mfbi->y_aoi_d;
+ else
+ available_height = base_plane_height;
+ if (var->yres > available_height)
+ var->yres = available_height;
+ if ((mfbi->y_aoi_d + var->yres) > available_height)
+ mfbi->y_aoi_d = available_height - var->yres;
+ break;
+ case 2: /* AOI 1 */
+ case 4:
+ upper_aoi_mfbi = machine_data->fsl_diu_info[index-1]->par;
+ upper_aoi_height =
+ machine_data->fsl_diu_info[index-1]->var.yres;
+ upper_aoi_bottom = upper_aoi_mfbi->y_aoi_d + upper_aoi_height;
+ upper_aoi_is_open = upper_aoi_mfbi->count > 0 ? 1 : 0;
+ if (var->xres > base_plane_width)
+ var->xres = base_plane_width;
+ if ((mfbi->x_aoi_d + var->xres) > base_plane_width)
+ mfbi->x_aoi_d = base_plane_width - var->xres;
+ if (mfbi->y_aoi_d < 0)
+ mfbi->y_aoi_d = 0;
+ if (upper_aoi_is_open) {
+ if (mfbi->y_aoi_d < upper_aoi_bottom)
+ mfbi->y_aoi_d = upper_aoi_bottom;
+ available_height = base_plane_height
+ - upper_aoi_bottom;
+ } else
+ available_height = base_plane_height;
+ if (var->yres > available_height)
+ var->yres = available_height;
+ if ((mfbi->y_aoi_d + var->yres) > base_plane_height)
+ mfbi->y_aoi_d = base_plane_height - var->yres;
+ break;
+ }
+}
+/*
+ * Checks to see if the hardware supports the state requested by var passed
+ * in. This function does not alter the hardware state! If the var passed in
+ * is slightly off by what the hardware can support then we alter the var
+ * PASSED in to what we can do. If the hardware doesn't support mode change
+ * a -EINVAL will be returned by the upper layers.
+ */
+static int fsl_diu_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ unsigned long htotal, vtotal;
+
+ pr_debug("check_var xres: %d\n", var->xres);
+ pr_debug("check_var yres: %d\n", var->yres);
+
+ if (var->xres_virtual < var->xres)
+ var->xres_virtual = var->xres;
+ if (var->yres_virtual < var->yres)
+ var->yres_virtual = var->yres;
+
+ if (var->xoffset < 0)
+ var->xoffset = 0;
+
+ if (var->yoffset < 0)
+ var->yoffset = 0;
+
+ if (var->xoffset + info->var.xres > info->var.xres_virtual)
+ var->xoffset = info->var.xres_virtual - info->var.xres;
+
+ if (var->yoffset + info->var.yres > info->var.yres_virtual)
+ var->yoffset = info->var.yres_virtual - info->var.yres;
+
+ if ((var->bits_per_pixel != 32) && (var->bits_per_pixel != 24) &&
+ (var->bits_per_pixel != 16))
+ var->bits_per_pixel = default_bpp;
+
+ switch (var->bits_per_pixel) {
+ case 16:
+ var->red.length = 5;
+ var->red.offset = 11;
+ var->red.msb_right = 0;
+
+ var->green.length = 6;
+ var->green.offset = 5;
+ var->green.msb_right = 0;
+
+ var->blue.length = 5;
+ var->blue.offset = 0;
+ var->blue.msb_right = 0;
+
+ var->transp.length = 0;
+ var->transp.offset = 0;
+ var->transp.msb_right = 0;
+ break;
+ case 24:
+ var->red.length = 8;
+ var->red.offset = 0;
+ var->red.msb_right = 0;
+
+ var->green.length = 8;
+ var->green.offset = 8;
+ var->green.msb_right = 0;
+
+ var->blue.length = 8;
+ var->blue.offset = 16;
+ var->blue.msb_right = 0;
+
+ var->transp.length = 0;
+ var->transp.offset = 0;
+ var->transp.msb_right = 0;
+ break;
+ case 32:
+ var->red.length = 8;
+ var->red.offset = 16;
+ var->red.msb_right = 0;
+
+ var->green.length = 8;
+ var->green.offset = 8;
+ var->green.msb_right = 0;
+
+ var->blue.length = 8;
+ var->blue.offset = 0;
+ var->blue.msb_right = 0;
+
+ var->transp.length = 8;
+ var->transp.offset = 24;
+ var->transp.msb_right = 0;
+
+ break;
+ }
+ /* If the pixclock is below the minimum spec'd value then set to
+ * refresh rate for 60Hz since this is supported by most monitors.
+ * Refer to Documentation/fb/ for calculations.
+ */
+ if ((var->pixclock < MIN_PIX_CLK) || (var->pixclock > MAX_PIX_CLK)) {
+ htotal = var->xres + var->right_margin + var->hsync_len +
+ var->left_margin;
+ vtotal = var->yres + var->lower_margin + var->vsync_len +
+ var->upper_margin;
+ var->pixclock = (vtotal * htotal * 6UL) / 100UL;
+ var->pixclock = KHZ2PICOS(var->pixclock);
+ pr_debug("pixclock set for 60Hz refresh = %u ps\n",
+ var->pixclock);
+ }
+
+ var->height = -1;
+ var->width = -1;
+ var->grayscale = 0;
+
+ /* Copy nonstd field to/from sync for fbset usage */
+ var->sync |= var->nonstd;
+ var->nonstd |= var->sync;
+
+ adjust_aoi_size_position(var, info);
+ return 0;
+}
+
+static void set_fix(struct fb_info *info)
+{
+ struct fb_fix_screeninfo *fix = &info->fix;
+ struct fb_var_screeninfo *var = &info->var;
+ struct mfb_info *mfbi = info->par;
+
+ strncpy(fix->id, mfbi->id, strlen(mfbi->id));
+ fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
+ fix->type = FB_TYPE_PACKED_PIXELS;
+ fix->accel = FB_ACCEL_NONE;
+ fix->visual = FB_VISUAL_TRUECOLOR;
+ fix->xpanstep = 1;
+ fix->ypanstep = 1;
+}
+
+static void update_lcdc(struct fb_info *info)
+{
+ struct fb_var_screeninfo *var = &info->var;
+ struct mfb_info *mfbi = info->par;
+ struct fsl_diu_data *machine_data = mfbi->parent;
+ struct diu *hw;
+ int i, j;
+ char __iomem *cursor_base, *gamma_table_base;
+
+ u32 temp;
+
+ hw = dr.diu_reg;
+
+ if (mfbi->type == MFB_TYPE_OFF) {
+ fsl_diu_disable_panel(info);
+ return;
+ }
+
+ diu_ops.set_monitor_port(machine_data->monitor_port);
+ gamma_table_base = pool.gamma.vaddr;
+ cursor_base = pool.cursor.vaddr;
+ /* Prep for DIU init - gamma table, cursor table */
+
+ for (i = 0; i <= 2; i++)
+ for (j = 0; j <= 255; j++)
+ *gamma_table_base++ = j;
+
+ diu_ops.set_gamma_table(machine_data->monitor_port, pool.gamma.vaddr);
+
+ pr_debug("update-lcdc: HW - %p\n Disabling DIU\n", hw);
+ disable_lcdc(info);
+
+ /* Program DIU registers */
+
+ out_be32(&hw->gamma, pool.gamma.paddr);
+ out_be32(&hw->cursor, pool.cursor.paddr);
+
+ out_be32(&hw->bgnd, 0x007F7F7F); /* BGND */
+ out_be32(&hw->bgnd_wb, 0); /* BGND_WB */
+ out_be32(&hw->disp_size, (var->yres << 16 | var->xres));
+ /* DISP SIZE */
+ pr_debug("DIU xres: %d\n", var->xres);
+ pr_debug("DIU yres: %d\n", var->yres);
+
+ out_be32(&hw->wb_size, 0); /* WB SIZE */
+ out_be32(&hw->wb_mem_addr, 0); /* WB MEM ADDR */
+
+ /* Horizontal and vertical configuration register */
+ temp = var->left_margin << 22 | /* BP_H */
+ var->hsync_len << 11 | /* PW_H */
+ var->right_margin; /* FP_H */
+
+ out_be32(&hw->hsyn_para, temp);
+
+ temp = var->upper_margin << 22 | /* BP_V */
+ var->vsync_len << 11 | /* PW_V */
+ var->lower_margin; /* FP_V */
+
+ out_be32(&hw->vsyn_para, temp);
+
+ pr_debug("DIU right_margin - %d\n", var->right_margin);
+ pr_debug("DIU left_margin - %d\n", var->left_margin);
+ pr_debug("DIU hsync_len - %d\n", var->hsync_len);
+ pr_debug("DIU upper_margin - %d\n", var->upper_margin);
+ pr_debug("DIU lower_margin - %d\n", var->lower_margin);
+ pr_debug("DIU vsync_len - %d\n", var->vsync_len);
+ pr_debug("DIU HSYNC - 0x%08x\n", hw->hsyn_para);
+ pr_debug("DIU VSYNC - 0x%08x\n", hw->vsyn_para);
+
+ diu_ops.set_pixel_clock(var->pixclock);
+
+ out_be32(&hw->syn_pol, 0); /* SYNC SIGNALS POLARITY */
+ out_be32(&hw->thresholds, 0x00037800); /* The Thresholds */
+ out_be32(&hw->int_status, 0); /* INTERRUPT STATUS */
+ out_be32(&hw->plut, 0x01F5F666);
+
+ /* Enable the DIU */
+ enable_lcdc(info);
+}
+
+static int map_video_memory(struct fb_info *info)
+{
+ phys_addr_t phys;
+
+ pr_debug("info->var.xres_virtual = %d\n", info->var.xres_virtual);
+ pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual);
+ pr_debug("info->fix.line_length = %d\n", info->fix.line_length);
+
+ info->fix.smem_len = info->fix.line_length * info->var.yres_virtual;
+ pr_debug("MAP_VIDEO_MEMORY: smem_len = %d\n", info->fix.smem_len);
+ info->screen_base = fsl_diu_alloc(info->fix.smem_len, &phys);
+ if (info->screen_base == 0) {
+ printk(KERN_ERR "Unable to allocate fb memory\n");
+ return -ENOMEM;
+ }
+ info->fix.smem_start = (unsigned long) phys;
+ info->screen_size = info->fix.smem_len;
+
+ pr_debug("Allocated fb @ paddr=0x%08lx, size=%d.\n",
+ info->fix.smem_start,
+ info->fix.smem_len);
+ pr_debug("screen base %p\n", info->screen_base);
+
+ return 0;
+}
+
+static void unmap_video_memory(struct fb_info *info)
+{
+ fsl_diu_free(info->screen_base, info->fix.smem_len);
+ info->screen_base = 0;
+ info->fix.smem_start = 0;
+ info->fix.smem_len = 0;
+}
+
+/*
+ * Using the fb_var_screeninfo in fb_info we set the resolution of this
+ * particular framebuffer. This function alters the fb_fix_screeninfo stored
+ * in fb_info. It does not alter var in fb_info since we are using that
+ * data. This means we depend on the data in var inside fb_info to be
+ * supported by the hardware. fsl_diu_check_var is always called before
+ * fsl_diu_set_par to ensure this.
+ */
+static int fsl_diu_set_par(struct fb_info *info)
+{
+ unsigned long len;
+ struct fb_var_screeninfo *var = &info->var;
+ struct mfb_info *mfbi = info->par;
+ struct fsl_diu_data *machine_data = mfbi->parent;
+ struct diu_ad *ad = mfbi->ad;
+ struct diu *hw;
+
+ hw = dr.diu_reg;
+
+ set_fix(info);
+ mfbi->cursor_reset = 1;
+
+ len = info->var.yres_virtual * info->fix.line_length;
+ /* Alloc & dealloc each time resolution/bpp change */
+ if (len != info->fix.smem_len) {
+ if (info->fix.smem_start)
+ unmap_video_memory(info);
+ pr_debug("SET PAR: smem_len = %d\n", info->fix.smem_len);
+
+ /* Memory allocation for framebuffer */
+ if (map_video_memory(info)) {
+ printk(KERN_ERR "Unable to allocate fb memory 1\n");
+ return -ENOMEM;
+ }
+ }
+
+ ad->pix_fmt =
+ diu_ops.get_pixel_format(var->bits_per_pixel,
+ machine_data->monitor_port);
+ ad->addr = cpu_to_le32(info->fix.smem_start);
+ ad->src_size_g_alpha = cpu_to_le32((var->yres << 12) |
+ var->xres) | mfbi->g_alpha;
+ /* fix me. AOI should not be greater than display size */
+ ad->aoi_size = cpu_to_le32((var->yres << 16) | var->xres);
+ ad->offset_xyi = 0;
+ ad->offset_xyd = cpu_to_le32((mfbi->y_aoi_d << 16) | mfbi->x_aoi_d);
+
+ /* Disable chroma keying function */
+ ad->ckmax_r = 0;
+ ad->ckmax_g = 0;
+ ad->ckmax_b = 0;
+
+ ad->ckmin_r = 255;
+ ad->ckmin_g = 255;
+ ad->ckmin_b = 255;
+
+ if (mfbi->index == 0)
+ update_lcdc(info);
+ return 0;
+}
+
+static inline __u32 CNVT_TOHW(__u32 val, __u32 width)
+{
+ return ((val<<width) + 0x7FFF - val)>>16;
+}
+
+/*
+ * Set a single color register. The values supplied have a 16 bit magnitude
+ * which needs to be scaled in this function for the hardware. Things to take
+ * into consideration are how many color registers, if any, are supported with
+ * the current color visual. With truecolor mode no color palettes are
+ * supported. Here a psuedo palette is created which we store the value in
+ * pseudo_palette in struct fb_info. For pseudocolor mode we have a limited
+ * color palette.
+ */
+static int fsl_diu_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp, struct fb_info *info)
+{
+ int ret = 1;
+
+ /*
+ * If greyscale is true, then we convert the RGB value
+ * to greyscale no matter what visual we are using.
+ */
+ if (info->var.grayscale)
+ red = green = blue = (19595 * red + 38470 * green +
+ 7471 * blue) >> 16;
+ switch (info->fix.visual) {
+ case FB_VISUAL_TRUECOLOR:
+ /*
+ * 16-bit True Colour. We encode the RGB value
+ * according to the RGB bitfield information.
+ */
+ if (regno < 16) {
+ u32 *pal = info->pseudo_palette;
+ u32 v;
+
+ red = CNVT_TOHW(red, info->var.red.length);
+ green = CNVT_TOHW(green, info->var.green.length);
+ blue = CNVT_TOHW(blue, info->var.blue.length);
+ transp = CNVT_TOHW(transp, info->var.transp.length);
+
+ v = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset) |
+ (transp << info->var.transp.offset);
+
+ pal[regno] = v;
+ ret = 0;
+ }
+ break;
+ case FB_VISUAL_STATIC_PSEUDOCOLOR:
+ case FB_VISUAL_PSEUDOCOLOR:
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Pan (or wrap, depending on the `vmode' field) the display using the
+ * 'xoffset' and 'yoffset' fields of the 'var' structure. If the values
+ * don't fit, return -EINVAL.
+ */
+static int fsl_diu_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ if ((info->var.xoffset == var->xoffset) &&
+ (info->var.yoffset == var->yoffset))
+ return 0; /* No change, do nothing */
+
+ if (var->xoffset < 0 || var->yoffset < 0
+ || var->xoffset + info->var.xres > info->var.xres_virtual
+ || var->yoffset + info->var.yres > info->var.yres_virtual)
+ return -EINVAL;
+
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+
+ if (var->vmode & FB_VMODE_YWRAP)
+ info->var.vmode |= FB_VMODE_YWRAP;
+ else
+ info->var.vmode &= ~FB_VMODE_YWRAP;
+
+ return 0;
+}
+
+/*
+ * Blank the screen if blank_mode != 0, else unblank. Return 0 if blanking
+ * succeeded, != 0 if un-/blanking failed.
+ * blank_mode == 2: suspend vsync
+ * blank_mode == 3: suspend hsync
+ * blank_mode == 4: powerdown
+ */
+static int fsl_diu_blank(int blank_mode, struct fb_info *info)
+{
+ struct mfb_info *mfbi = info->par;
+
+ mfbi->blank = blank_mode;
+
+ switch (blank_mode) {
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ /* FIXME: fixes to enable_panel and enable lcdc needed */
+ case FB_BLANK_NORMAL:
+ /* fsl_diu_disable_panel(info);*/
+ break;
+ case FB_BLANK_POWERDOWN:
+ /* disable_lcdc(info); */
+ break;
+ case FB_BLANK_UNBLANK:
+ /* fsl_diu_enable_panel(info);*/
+ break;
+ }
+
+ return 0;
+}
+
+static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mfb_info *mfbi = info->par;
+ struct diu_ad *ad = mfbi->ad;
+ struct mfb_chroma_key ck;
+ unsigned char global_alpha;
+ struct aoi_display_offset aoi_d;
+ __u32 pix_fmt;
+ void __user *buf = (void __user *)arg;
+
+ if (!arg)
+ return -EINVAL;
+ switch (cmd) {
+ case MFB_SET_PIXFMT:
+ if (copy_from_user(&pix_fmt, buf, sizeof(pix_fmt)))
+ return -EFAULT;
+ ad->pix_fmt = pix_fmt;
+ pr_debug("Set pixel format to 0x%08x\n", ad->pix_fmt);
+ break;
+ case MFB_GET_PIXFMT:
+ pix_fmt = ad->pix_fmt;
+ if (copy_to_user(buf, &pix_fmt, sizeof(pix_fmt)))
+ return -EFAULT;
+ pr_debug("get pixel format 0x%08x\n", ad->pix_fmt);
+ break;
+ case MFB_SET_AOID:
+ if (copy_from_user(&aoi_d, buf, sizeof(aoi_d)))
+ return -EFAULT;
+ mfbi->x_aoi_d = aoi_d.x_aoi_d;
+ mfbi->y_aoi_d = aoi_d.y_aoi_d;
+ pr_debug("set AOI display offset of index %d to (%d,%d)\n",
+ mfbi->index, aoi_d.x_aoi_d, aoi_d.y_aoi_d);
+ fsl_diu_check_var(&info->var, info);
+ fsl_diu_set_par(info);
+ break;
+ case MFB_GET_AOID:
+ aoi_d.x_aoi_d = mfbi->x_aoi_d;
+ aoi_d.y_aoi_d = mfbi->y_aoi_d;
+ if (copy_to_user(buf, &aoi_d, sizeof(aoi_d)))
+ return -EFAULT;
+ pr_debug("get AOI display offset of index %d (%d,%d)\n",
+ mfbi->index, aoi_d.x_aoi_d, aoi_d.y_aoi_d);
+ break;
+ case MFB_GET_ALPHA:
+ global_alpha = mfbi->g_alpha;
+ if (copy_to_user(buf, &global_alpha, sizeof(global_alpha)))
+ return -EFAULT;
+ pr_debug("get global alpha of index %d\n", mfbi->index);
+ break;
+ case MFB_SET_ALPHA:
+ /* set panel information */
+ if (copy_from_user(&global_alpha, buf, sizeof(global_alpha)))
+ return -EFAULT;
+ ad->src_size_g_alpha = (ad->src_size_g_alpha & (~0xff)) |
+ (global_alpha & 0xff);
+ mfbi->g_alpha = global_alpha;
+ pr_debug("set global alpha for index %d\n", mfbi->index);
+ break;
+ case MFB_SET_CHROMA_KEY:
+ /* set panel winformation */
+ if (copy_from_user(&ck, buf, sizeof(ck)))
+ return -EFAULT;
+
+ if (ck.enable &&
+ (ck.red_max < ck.red_min ||
+ ck.green_max < ck.green_min ||
+ ck.blue_max < ck.blue_min))
+ return -EINVAL;
+
+ if (!ck.enable) {
+ ad->ckmax_r = 0;
+ ad->ckmax_g = 0;
+ ad->ckmax_b = 0;
+ ad->ckmin_r = 255;
+ ad->ckmin_g = 255;
+ ad->ckmin_b = 255;
+ } else {
+ ad->ckmax_r = ck.red_max;
+ ad->ckmax_g = ck.green_max;
+ ad->ckmax_b = ck.blue_max;
+ ad->ckmin_r = ck.red_min;
+ ad->ckmin_g = ck.green_min;
+ ad->ckmin_b = ck.blue_min;
+ }
+ pr_debug("set chroma key\n");
+ break;
+ case FBIOGET_GWINFO:
+ if (mfbi->type == MFB_TYPE_OFF)
+ return -ENODEV;
+ /* get graphic window information */
+ if (copy_to_user(buf, ad, sizeof(*ad)))
+ return -EFAULT;
+ break;
+ case FBIOGET_HWCINFO:
+ pr_debug("FBIOGET_HWCINFO:0x%08x\n", FBIOGET_HWCINFO);
+ break;
+ case FBIOPUT_MODEINFO:
+ pr_debug("FBIOPUT_MODEINFO:0x%08x\n", FBIOPUT_MODEINFO);
+ break;
+ case FBIOGET_DISPINFO:
+ pr_debug("FBIOGET_DISPINFO:0x%08x\n", FBIOGET_DISPINFO);
+ break;
+
+ default:
+ printk(KERN_ERR "Unknown ioctl command (0x%08X)\n", cmd);
+ return -ENOIOCTLCMD;
+ }
+
+ return 0;
+}
+
+/* turn on fb if count == 1
+ */
+static int fsl_diu_open(struct fb_info *info, int user)
+{
+ struct mfb_info *mfbi = info->par;
+ int res = 0;
+
+ spin_lock(&diu_lock);
+ mfbi->count++;
+ if (mfbi->count == 1) {
+ pr_debug("open plane index %d\n", mfbi->index);
+ fsl_diu_check_var(&info->var, info);
+ res = fsl_diu_set_par(info);
+ if (res < 0)
+ mfbi->count--;
+ else {
+ res = fsl_diu_enable_panel(info);
+ if (res < 0)
+ mfbi->count--;
+ }
+ }
+
+ spin_unlock(&diu_lock);
+ return res;
+}
+
+/* turn off fb if count == 0
+ */
+static int fsl_diu_release(struct fb_info *info, int user)
+{
+ struct mfb_info *mfbi = info->par;
+ int res = 0;
+
+ spin_lock(&diu_lock);
+ mfbi->count--;
+ if (mfbi->count == 0) {
+ pr_debug("release plane index %d\n", mfbi->index);
+ res = fsl_diu_disable_panel(info);
+ if (res < 0)
+ mfbi->count++;
+ }
+ spin_unlock(&diu_lock);
+ return res;
+}
+
+static struct fb_ops fsl_diu_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = fsl_diu_check_var,
+ .fb_set_par = fsl_diu_set_par,
+ .fb_setcolreg = fsl_diu_setcolreg,
+ .fb_blank = fsl_diu_blank,
+ .fb_pan_display = fsl_diu_pan_display,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_ioctl = fsl_diu_ioctl,
+ .fb_open = fsl_diu_open,
+ .fb_release = fsl_diu_release,
+};
+
+static int init_fbinfo(struct fb_info *info)
+{
+ struct mfb_info *mfbi = info->par;
+
+ info->device = NULL;
+ info->var.activate = FB_ACTIVATE_NOW;
+ info->fbops = &fsl_diu_ops;
+ info->flags = FBINFO_FLAG_DEFAULT;
+ info->pseudo_palette = &mfbi->pseudo_palette;
+
+ /* Allocate colormap */
+ fb_alloc_cmap(&info->cmap, 16, 0);
+ return 0;
+}
+
+static int install_fb(struct fb_info *info)
+{
+ int rc;
+ struct mfb_info *mfbi = info->par;
+ const char *aoi_mode, *init_aoi_mode = "320x240";
+
+ if (init_fbinfo(info))
+ return -EINVAL;
+
+ if (mfbi->index == 0) /* plane 0 */
+ aoi_mode = fb_mode;
+ else
+ aoi_mode = init_aoi_mode;
+ pr_debug("mode used = %s\n", aoi_mode);
+ rc = fb_find_mode(&info->var, info, aoi_mode, fsl_diu_mode_db,
+ ARRAY_SIZE(fsl_diu_mode_db), &fsl_diu_default_mode, default_bpp);
+
+ switch (rc) {
+ case 1:
+ pr_debug("using mode specified in @mode\n");
+ break;
+ case 2:
+ pr_debug("using mode specified in @mode "
+ "with ignored refresh rate\n");
+ break;
+ case 3:
+ pr_debug("using mode default mode\n");
+ break;
+ case 4:
+ pr_debug("using mode from list\n");
+ break;
+ default:
+ pr_debug("rc = %d\n", rc);
+ pr_debug("failed to find mode\n");
+ return -EINVAL;
+ break;
+ }
+
+ pr_debug("xres_virtual %d\n", info->var.xres_virtual);
+ pr_debug("bits_per_pixel %d\n", info->var.bits_per_pixel);
+
+ pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual);
+ pr_debug("info->fix.line_length = %d\n", info->fix.line_length);
+
+ if (mfbi->type == MFB_TYPE_OFF)
+ mfbi->blank = FB_BLANK_NORMAL;
+ else
+ mfbi->blank = FB_BLANK_UNBLANK;
+
+ if (fsl_diu_check_var(&info->var, info)) {
+ printk(KERN_ERR "fb_check_var failed");
+ fb_dealloc_cmap(&info->cmap);
+ return -EINVAL;
+ }
+
+ if (fsl_diu_set_par(info)) {
+ printk(KERN_ERR "fb_set_par failed");
+ fb_dealloc_cmap(&info->cmap);
+ return -EINVAL;
+ }
+
+ if (register_framebuffer(info) < 0) {
+ printk(KERN_ERR "register_framebuffer failed");
+ unmap_video_memory(info);
+ fb_dealloc_cmap(&info->cmap);
+ return -EINVAL;
+ }
+
+ mfbi->registered = 1;
+ printk(KERN_INFO "fb%d: %s fb device registered successfully.\n",
+ info->node, info->fix.id);
+
+ return 0;
+}
+
+static void __exit uninstall_fb(struct fb_info *info)
+{
+ struct mfb_info *mfbi = info->par;
+
+ if (!mfbi->registered)
+ return;
+
+ unregister_framebuffer(info);
+ unmap_video_memory(info);
+ if (&info->cmap)
+ fb_dealloc_cmap(&info->cmap);
+
+ mfbi->registered = 0;
+}
+
+static irqreturn_t fsl_diu_isr(int irq, void *dev_id)
+{
+ struct diu *hw = dr.diu_reg;
+ unsigned int status = in_be32(&hw->int_status);
+
+ if (status) {
+ /* This is the workaround for underrun */
+ if (status & INT_UNDRUN) {
+ out_be32(&hw->diu_mode, 0);
+ pr_debug("Err: DIU occurs underrun!\n");
+ udelay(1);
+ out_be32(&hw->diu_mode, 1);
+ }
+#if defined(CONFIG_NOT_COHERENT_CACHE)
+ else if (status & INT_VSYNC) {
+ unsigned int i;
+ for (i = 0; i < coherence_data_size;
+ i += d_cache_line_size)
+ __asm__ __volatile__ (
+ "dcbz 0, %[input]"
+ ::[input]"r"(&coherence_data[i]));
+ }
+#endif
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static int request_irq_local(int irq)
+{
+ unsigned long status, ints;
+ struct diu *hw;
+ int ret;
+
+ hw = dr.diu_reg;
+
+ /* Read to clear the status */
+ status = in_be32(&hw->int_status);
+
+ ret = request_irq(irq, fsl_diu_isr, 0, "diu", 0);
+ if (ret)
+ pr_info("Request diu IRQ failed.\n");
+ else {
+ ints = INT_PARERR | INT_LS_BF_VS;
+#if !defined(CONFIG_NOT_COHERENT_CACHE)
+ ints |= INT_VSYNC;
+#endif
+ if (dr.mode == MFB_MODE2 || dr.mode == MFB_MODE3)
+ ints |= INT_VSYNC_WB;
+
+ /* Read to clear the status */
+ status = in_be32(&hw->int_status);
+ out_be32(&hw->int_mask, ints);
+ }
+ return ret;
+}
+
+static void free_irq_local(int irq)
+{
+ struct diu *hw = dr.diu_reg;
+
+ /* Disable all LCDC interrupt */
+ out_be32(&hw->int_mask, 0x1f);
+
+ free_irq(irq, 0);
+}
+
+#ifdef CONFIG_PM
+/*
+ * Power management hooks. Note that we won't be called from IRQ context,
+ * unlike the blank functions above, so we may sleep.
+ */
+static int fsl_diu_suspend(struct of_device *dev, pm_message_t state)
+{
+ struct fsl_diu_data *machine_data;
+
+ machine_data = dev_get_drvdata(&ofdev->dev);
+ disable_lcdc(machine_data->fsl_diu_info[0]);
+
+ return 0;
+}
+
+static int fsl_diu_resume(struct of_device *dev)
+{
+ struct fsl_diu_data *machine_data;
+
+ machine_data = dev_get_drvdata(&ofdev->dev);
+ enable_lcdc(machine_data->fsl_diu_info[0]);
+
+ return 0;
+}
+
+#else
+#define fsl_diu_suspend NULL
+#define fsl_diu_resume NULL
+#endif /* CONFIG_PM */
+
+/* Align to 64-bit(8-byte), 32-byte, etc. */
+static int allocate_buf(struct diu_addr *buf, u32 size, u32 bytes_align)
+{
+ u32 offset, ssize;
+ u32 mask;
+ dma_addr_t paddr = 0;
+
+ ssize = size + bytes_align;
+ buf->vaddr = dma_alloc_coherent(0, ssize, &paddr, GFP_DMA | __GFP_ZERO);
+ if (!buf->vaddr)
+ return -ENOMEM;
+
+ buf->paddr = (__u32) paddr;
+
+ mask = bytes_align - 1;
+ offset = (u32)buf->paddr & mask;
+ if (offset) {
+ buf->offset = bytes_align - offset;
+ buf->paddr = (u32)buf->paddr + offset;
+ } else
+ buf->offset = 0;
+ return 0;
+}
+
+static void free_buf(struct diu_addr *buf, u32 size, u32 bytes_align)
+{
+ dma_free_coherent(0, size + bytes_align,
+ buf->vaddr, (buf->paddr - buf->offset));
+ return;
+}
+
+static ssize_t store_monitor(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int old_monitor_port;
+ unsigned long val;
+ struct fsl_diu_data *machine_data =
+ container_of(attr, struct fsl_diu_data, dev_attr);
+
+ if (strict_strtoul(buf, 10, &val))
+ return 0;
+
+ old_monitor_port = machine_data->monitor_port;
+ machine_data->monitor_port = diu_ops.set_sysfs_monitor_port(val);
+
+ if (old_monitor_port != machine_data->monitor_port) {
+ /* All AOIs need adjust pixel format
+ * fsl_diu_set_par only change the pixsel format here
+ * unlikely to fail. */
+ fsl_diu_set_par(machine_data->fsl_diu_info[0]);
+ fsl_diu_set_par(machine_data->fsl_diu_info[1]);
+ fsl_diu_set_par(machine_data->fsl_diu_info[2]);
+ fsl_diu_set_par(machine_data->fsl_diu_info[3]);
+ fsl_diu_set_par(machine_data->fsl_diu_info[4]);
+ }
+ return count;
+}
+
+static ssize_t show_monitor(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct fsl_diu_data *machine_data =
+ container_of(attr, struct fsl_diu_data, dev_attr);
+ return diu_ops.show_monitor_port(machine_data->monitor_port, buf);
+}
+
+static int fsl_diu_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct device_node *np = ofdev->node;
+ struct mfb_info *mfbi;
+ phys_addr_t dummy_ad_addr;
+ int ret, i, error = 0;
+ struct resource res;
+ struct fsl_diu_data *machine_data;
+
+ machine_data = kzalloc(sizeof(struct fsl_diu_data), GFP_KERNEL);
+ if (!machine_data)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++) {
+ machine_data->fsl_diu_info[i] =
+ framebuffer_alloc(sizeof(struct mfb_info), &ofdev->dev);
+ if (!machine_data->fsl_diu_info[i]) {
+ dev_err(&ofdev->dev, "cannot allocate memory\n");
+ ret = -ENOMEM;
+ goto error2;
+ }
+ mfbi = machine_data->fsl_diu_info[i]->par;
+ memcpy(mfbi, &mfb_template[i], sizeof(struct mfb_info));
+ mfbi->parent = machine_data;
+ }
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ dev_err(&ofdev->dev, "could not obtain DIU address\n");
+ goto error;
+ }
+ if (!res.start) {
+ dev_err(&ofdev->dev, "invalid DIU address\n");
+ goto error;
+ }
+ dev_dbg(&ofdev->dev, "%s, res.start: 0x%08x\n", __func__, res.start);
+
+ dr.diu_reg = ioremap(res.start, sizeof(struct diu));
+ if (!dr.diu_reg) {
+ dev_err(&ofdev->dev, "Err: can't map DIU registers!\n");
+ ret = -EFAULT;
+ goto error2;
+ }
+
+ out_be32(&dr.diu_reg->diu_mode, 0); /* disable DIU anyway*/
+
+ /* Get the IRQ of the DIU */
+ machine_data->irq = irq_of_parse_and_map(np, 0);
+
+ if (!machine_data->irq) {
+ dev_err(&ofdev->dev, "could not get DIU IRQ\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ machine_data->monitor_port = monitor_port;
+
+ /* Area descriptor memory pool aligns to 64-bit boundary */
+ if (allocate_buf(&pool.ad, sizeof(struct diu_ad) * FSL_AOI_NUM, 8))
+ return -ENOMEM;
+
+ /* Get memory for Gamma Table - 32-byte aligned memory */
+ if (allocate_buf(&pool.gamma, 768, 32)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* For performance, cursor bitmap buffer aligns to 32-byte boundary */
+ if (allocate_buf(&pool.cursor, MAX_CURS * MAX_CURS * 2, 32)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ i = ARRAY_SIZE(machine_data->fsl_diu_info);
+ machine_data->dummy_ad = (struct diu_ad *)
+ ((u32)pool.ad.vaddr + pool.ad.offset) + i;
+ machine_data->dummy_ad->paddr = pool.ad.paddr +
+ i * sizeof(struct diu_ad);
+ machine_data->dummy_aoi_virt = fsl_diu_alloc(64, &dummy_ad_addr);
+ if (!machine_data->dummy_aoi_virt) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ machine_data->dummy_ad->addr = cpu_to_le32(dummy_ad_addr);
+ machine_data->dummy_ad->pix_fmt = 0x88882317;
+ machine_data->dummy_ad->src_size_g_alpha = cpu_to_le32((4 << 12) | 4);
+ machine_data->dummy_ad->aoi_size = cpu_to_le32((4 << 16) | 2);
+ machine_data->dummy_ad->offset_xyi = 0;
+ machine_data->dummy_ad->offset_xyd = 0;
+ machine_data->dummy_ad->next_ad = 0;
+
+ out_be32(&dr.diu_reg->desc[0], machine_data->dummy_ad->paddr);
+ out_be32(&dr.diu_reg->desc[1], machine_data->dummy_ad->paddr);
+ out_be32(&dr.diu_reg->desc[2], machine_data->dummy_ad->paddr);
+
+ for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++) {
+ machine_data->fsl_diu_info[i]->fix.smem_start = 0;
+ mfbi = machine_data->fsl_diu_info[i]->par;
+ mfbi->ad = (struct diu_ad *)((u32)pool.ad.vaddr
+ + pool.ad.offset) + i;
+ mfbi->ad->paddr = pool.ad.paddr + i * sizeof(struct diu_ad);
+ ret = install_fb(machine_data->fsl_diu_info[i]);
+ if (ret) {
+ dev_err(&ofdev->dev,
+ "Failed to register framebuffer %d\n",
+ i);
+ goto error;
+ }
+ }
+
+ if (request_irq_local(machine_data->irq)) {
+ dev_err(machine_data->fsl_diu_info[0]->dev,
+ "could not request irq for diu.");
+ goto error;
+ }
+
+ machine_data->dev_attr.attr.name = "monitor";
+ machine_data->dev_attr.attr.mode = S_IRUGO|S_IWUSR;
+ machine_data->dev_attr.show = show_monitor;
+ machine_data->dev_attr.store = store_monitor;
+ error = device_create_file(machine_data->fsl_diu_info[0]->dev,
+ &machine_data->dev_attr);
+ if (error) {
+ dev_err(machine_data->fsl_diu_info[0]->dev,
+ "could not create sysfs %s file\n",
+ machine_data->dev_attr.attr.name);
+ }
+
+ dev_set_drvdata(&ofdev->dev, machine_data);
+ return 0;
+
+error:
+ for (i = ARRAY_SIZE(machine_data->fsl_diu_info);
+ i > 0; i--)
+ uninstall_fb(machine_data->fsl_diu_info[i - 1]);
+ if (pool.ad.vaddr)
+ free_buf(&pool.ad, sizeof(struct diu_ad) * FSL_AOI_NUM, 8);
+ if (pool.gamma.vaddr)
+ free_buf(&pool.gamma, 768, 32);
+ if (pool.cursor.vaddr)
+ free_buf(&pool.cursor, MAX_CURS * MAX_CURS * 2, 32);
+ if (machine_data->dummy_aoi_virt)
+ fsl_diu_free(machine_data->dummy_aoi_virt, 64);
+ iounmap(dr.diu_reg);
+
+error2:
+ for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
+ if (machine_data->fsl_diu_info[i])
+ framebuffer_release(machine_data->fsl_diu_info[i]);
+ kfree(machine_data);
+
+ return ret;
+}
+
+
+static int fsl_diu_remove(struct of_device *ofdev)
+{
+ struct fsl_diu_data *machine_data;
+ int i;
+
+ machine_data = dev_get_drvdata(&ofdev->dev);
+ disable_lcdc(machine_data->fsl_diu_info[0]);
+ free_irq_local(machine_data->irq);
+ for (i = ARRAY_SIZE(machine_data->fsl_diu_info); i > 0; i--)
+ uninstall_fb(machine_data->fsl_diu_info[i - 1]);
+ if (pool.ad.vaddr)
+ free_buf(&pool.ad, sizeof(struct diu_ad) * FSL_AOI_NUM, 8);
+ if (pool.gamma.vaddr)
+ free_buf(&pool.gamma, 768, 32);
+ if (pool.cursor.vaddr)
+ free_buf(&pool.cursor, MAX_CURS * MAX_CURS * 2, 32);
+ if (machine_data->dummy_aoi_virt)
+ fsl_diu_free(machine_data->dummy_aoi_virt, 64);
+ iounmap(dr.diu_reg);
+ for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
+ if (machine_data->fsl_diu_info[i])
+ framebuffer_release(machine_data->fsl_diu_info[i]);
+ kfree(machine_data);
+
+ return 0;
+}
+
+#ifndef MODULE
+static int __init fsl_diu_setup(char *options)
+{
+ char *opt;
+ unsigned long val;
+
+ if (!options || !*options)
+ return 0;
+
+ while ((opt = strsep(&options, ",")) != NULL) {
+ if (!*opt)
+ continue;
+ if (!strncmp(opt, "monitor=", 8)) {
+ if (!strict_strtoul(opt + 8, 10, &val) && (val <= 2))
+ monitor_port = val;
+ } else if (!strncmp(opt, "bpp=", 4)) {
+ if (!strict_strtoul(opt + 4, 10, &val))
+ default_bpp = val;
+ } else
+ fb_mode = opt;
+ }
+
+ return 0;
+}
+#endif
+
+static struct of_device_id fsl_diu_match[] = {
+ {
+ .compatible = "fsl,diu",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsl_diu_match);
+
+static struct of_platform_driver fsl_diu_driver = {
+ .owner = THIS_MODULE,
+ .name = "fsl_diu",
+ .match_table = fsl_diu_match,
+ .probe = fsl_diu_probe,
+ .remove = fsl_diu_remove,
+ .suspend = fsl_diu_suspend,
+ .resume = fsl_diu_resume,
+};
+
+static int __init fsl_diu_init(void)
+{
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ struct device_node *np;
+ const u32 *prop;
+#endif
+ int ret;
+#ifndef MODULE
+ char *option;
+
+ /*
+ * For kernel boot options (in 'video=xxxfb:<options>' format)
+ */
+ if (fb_get_options("fslfb", &option))
+ return -ENODEV;
+ fsl_diu_setup(option);
+#endif
+ printk(KERN_INFO "Freescale DIU driver\n");
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ np = of_find_node_by_type(NULL, "cpu");
+ if (!np) {
+ printk(KERN_ERR "Err: can't find device node 'cpu'\n");
+ return -ENODEV;
+ }
+
+ prop = of_get_property(np, "d-cache-size", NULL);
+ if (prop == NULL)
+ return -ENODEV;
+
+ /* Freescale PLRU requires 13/8 times the cache size to do a proper
+ displacement flush
+ */
+ coherence_data_size = *prop * 13;
+ coherence_data_size /= 8;
+
+ prop = of_get_property(np, "d-cache-line-size", NULL);
+ if (prop == NULL)
+ return -ENODEV;
+ d_cache_line_size = *prop;
+
+ of_node_put(np);
+ coherence_data = vmalloc(coherence_data_size);
+ if (!coherence_data)
+ return -ENOMEM;
+#endif
+ ret = of_register_platform_driver(&fsl_diu_driver);
+ if (ret) {
+ printk(KERN_ERR
+ "fsl-diu: failed to register platform driver\n");
+#if defined(CONFIG_NOT_COHERENT_CACHE)
+ vfree(coherence_data);
+#endif
+ iounmap(dr.diu_reg);
+ }
+ return ret;
+}
+
+static void __exit fsl_diu_exit(void)
+{
+ of_unregister_platform_driver(&fsl_diu_driver);
+#if defined(CONFIG_NOT_COHERENT_CACHE)
+ vfree(coherence_data);
+#endif
+}
+
+module_init(fsl_diu_init);
+module_exit(fsl_diu_exit);
+
+MODULE_AUTHOR("York Sun <yorksun@freescale.com>");
+MODULE_DESCRIPTION("Freescale DIU framebuffer driver");
+MODULE_LICENSE("GPL");
+
+module_param_named(mode, fb_mode, charp, 0);
+MODULE_PARM_DESC(mode,
+ "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" ");
+module_param_named(bpp, default_bpp, ulong, 0);
+MODULE_PARM_DESC(bpp, "Specify bit-per-pixel if not specified mode");
+module_param_named(monitor, monitor_port, int, 0);
+MODULE_PARM_DESC(monitor,
+ "Specify the monitor port (0, 1 or 2) if supported by the platform");
+
diff --git a/drivers/video/fsl-diu-fb.h b/drivers/video/fsl-diu-fb.h
new file mode 100644
index 000000000000..fc295d7ea463
--- /dev/null
+++ b/drivers/video/fsl-diu-fb.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * Freescale DIU Frame Buffer device driver
+ *
+ * Authors: Hongjun Chen <hong-jun.chen@freescale.com>
+ * Paul Widmer <paul.widmer@freescale.com>
+ * Srikanth Srinivasan <srikanth.srinivasan@freescale.com>
+ * York Sun <yorksun@freescale.com>
+ *
+ * Based on imxfb.c Copyright (C) 2004 S.Hauer, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __FSL_DIU_FB_H__
+#define __FSL_DIU_FB_H__
+
+/* Arbitrary threshold to determine the allocation method
+ * See mpc8610fb_set_par(), map_video_memory(), and unmap_video_memory()
+ */
+#define MEM_ALLOC_THRESHOLD (1024*768*4+32)
+/* Minimum value that the pixel clock can be set to in pico seconds
+ * This is determined by platform clock/3 where the minimum platform
+ * clock is 533MHz. This gives 5629 pico seconds.
+ */
+#define MIN_PIX_CLK 5629
+#define MAX_PIX_CLK 96096
+
+#include <linux/types.h>
+
+struct mfb_alpha {
+ int enable;
+ int alpha;
+};
+
+struct mfb_chroma_key {
+ int enable;
+ __u8 red_max;
+ __u8 green_max;
+ __u8 blue_max;
+ __u8 red_min;
+ __u8 green_min;
+ __u8 blue_min;
+};
+
+struct aoi_display_offset {
+ int x_aoi_d;
+ int y_aoi_d;
+};
+
+#define MFB_SET_CHROMA_KEY _IOW('M', 1, struct mfb_chroma_key)
+#define MFB_WAIT_FOR_VSYNC _IOW('F', 0x20, u_int32_t)
+#define MFB_SET_BRIGHTNESS _IOW('M', 3, __u8)
+
+#define MFB_SET_ALPHA 0x80014d00
+#define MFB_GET_ALPHA 0x40014d00
+#define MFB_SET_AOID 0x80084d04
+#define MFB_GET_AOID 0x40084d04
+#define MFB_SET_PIXFMT 0x80014d08
+#define MFB_GET_PIXFMT 0x40014d08
+
+#define FBIOGET_GWINFO 0x46E0
+#define FBIOPUT_GWINFO 0x46E1
+
+#ifdef __KERNEL__
+#include <linux/spinlock.h>
+
+/*
+ * These are the fields of area descriptor(in DDR memory) for every plane
+ */
+struct diu_ad {
+ /* Word 0(32-bit) in DDR memory */
+/* __u16 comp; */
+/* __u16 pixel_s:2; */
+/* __u16 pallete:1; */
+/* __u16 red_c:2; */
+/* __u16 green_c:2; */
+/* __u16 blue_c:2; */
+/* __u16 alpha_c:3; */
+/* __u16 byte_f:1; */
+/* __u16 res0:3; */
+
+ __be32 pix_fmt; /* hard coding pixel format */
+
+ /* Word 1(32-bit) in DDR memory */
+ __le32 addr;
+
+ /* Word 2(32-bit) in DDR memory */
+/* __u32 delta_xs:11; */
+/* __u32 res1:1; */
+/* __u32 delta_ys:11; */
+/* __u32 res2:1; */
+/* __u32 g_alpha:8; */
+ __le32 src_size_g_alpha;
+
+ /* Word 3(32-bit) in DDR memory */
+/* __u32 delta_xi:11; */
+/* __u32 res3:5; */
+/* __u32 delta_yi:11; */
+/* __u32 res4:3; */
+/* __u32 flip:2; */
+ __le32 aoi_size;
+
+ /* Word 4(32-bit) in DDR memory */
+ /*__u32 offset_xi:11;
+ __u32 res5:5;
+ __u32 offset_yi:11;
+ __u32 res6:5;
+ */
+ __le32 offset_xyi;
+
+ /* Word 5(32-bit) in DDR memory */
+ /*__u32 offset_xd:11;
+ __u32 res7:5;
+ __u32 offset_yd:11;
+ __u32 res8:5; */
+ __le32 offset_xyd;
+
+
+ /* Word 6(32-bit) in DDR memory */
+ __u8 ckmax_r;
+ __u8 ckmax_g;
+ __u8 ckmax_b;
+ __u8 res9;
+
+ /* Word 7(32-bit) in DDR memory */
+ __u8 ckmin_r;
+ __u8 ckmin_g;
+ __u8 ckmin_b;
+ __u8 res10;
+/* __u32 res10:8; */
+
+ /* Word 8(32-bit) in DDR memory */
+ __le32 next_ad;
+
+ /* Word 9(32-bit) in DDR memory, just for 64-bit aligned */
+ __u32 paddr;
+} __attribute__ ((packed));
+
+/* DIU register map */
+struct diu {
+ __be32 desc[3];
+ __be32 gamma;
+ __be32 pallete;
+ __be32 cursor;
+ __be32 curs_pos;
+ __be32 diu_mode;
+ __be32 bgnd;
+ __be32 bgnd_wb;
+ __be32 disp_size;
+ __be32 wb_size;
+ __be32 wb_mem_addr;
+ __be32 hsyn_para;
+ __be32 vsyn_para;
+ __be32 syn_pol;
+ __be32 thresholds;
+ __be32 int_status;
+ __be32 int_mask;
+ __be32 colorbar[8];
+ __be32 filling;
+ __be32 plut;
+} __attribute__ ((packed));
+
+struct diu_hw {
+ struct diu *diu_reg;
+ spinlock_t reg_lock;
+
+ __u32 mode; /* DIU operation mode */
+};
+
+struct diu_addr {
+ __u8 __iomem *vaddr; /* Virtual address */
+ dma_addr_t paddr; /* Physical address */
+ __u32 offset;
+};
+
+struct diu_pool {
+ struct diu_addr ad;
+ struct diu_addr gamma;
+ struct diu_addr pallete;
+ struct diu_addr cursor;
+};
+
+#define FSL_DIU_BASE_OFFSET 0x2C000 /* Offset of DIU */
+#define INT_LCDC 64 /* DIU interrupt number */
+
+#define FSL_AOI_NUM 6 /* 5 AOIs and one dummy AOI */
+ /* 1 for plane 0, 2 for plane 1&2 each */
+
+/* Minimum X and Y resolutions */
+#define MIN_XRES 64
+#define MIN_YRES 64
+
+/* HW cursor parameters */
+#define MAX_CURS 32
+
+/* Modes of operation of DIU */
+#define MFB_MODE0 0 /* DIU off */
+#define MFB_MODE1 1 /* All three planes output to display */
+#define MFB_MODE2 2 /* Plane 1 to display, planes 2+3 written back*/
+#define MFB_MODE3 3 /* All three planes written back to memory */
+#define MFB_MODE4 4 /* Color bar generation */
+
+/* INT_STATUS/INT_MASK field descriptions */
+#define INT_VSYNC 0x01 /* Vsync interrupt */
+#define INT_VSYNC_WB 0x02 /* Vsync interrupt for write back operation */
+#define INT_UNDRUN 0x04 /* Under run exception interrupt */
+#define INT_PARERR 0x08 /* Display parameters error interrupt */
+#define INT_LS_BF_VS 0x10 /* Lines before vsync. interrupt */
+
+/* Panels'operation modes */
+#define MFB_TYPE_OUTPUT 0 /* Panel output to display */
+#define MFB_TYPE_OFF 1 /* Panel off */
+#define MFB_TYPE_WB 2 /* Panel written back to memory */
+#define MFB_TYPE_TEST 3 /* Panel generate color bar */
+
+#endif /* __KERNEL__ */
+#endif /* __FSL_DIU_FB_H__ */
diff --git a/drivers/video/geode/Kconfig b/drivers/video/geode/Kconfig
index 7608429b3943..c5d8ba4b9fc3 100644
--- a/drivers/video/geode/Kconfig
+++ b/drivers/video/geode/Kconfig
@@ -38,26 +38,6 @@ config FB_GEODE_GX
If unsure, say N.
-config FB_GEODE_GX_SET_FBSIZE
- bool "Manually specify the Geode GX framebuffer size"
- depends on FB_GEODE_GX
- default n
- ---help---
- If you want to manually specify the size of your GX framebuffer,
- say Y here, otherwise say N to dynamically probe it.
-
- Say N unless you know what you are doing.
-
-config FB_GEODE_GX_FBSIZE
- hex "Size of the GX framebuffer, in bytes"
- depends on FB_GEODE_GX_SET_FBSIZE
- default "0x1600000"
- ---help---
- Specify the size of the GX framebuffer. Normally, you will
- want this to be MB aligned. Common values are 0x80000 (8MB)
- and 0x1600000 (16MB). Don't change this unless you know what
- you are doing
-
config FB_GEODE_GX1
tristate "AMD Geode GX1 framebuffer support (EXPERIMENTAL)"
depends on FB && FB_GEODE && EXPERIMENTAL
diff --git a/drivers/video/geode/Makefile b/drivers/video/geode/Makefile
index 957304b45fba..5c98da126883 100644
--- a/drivers/video/geode/Makefile
+++ b/drivers/video/geode/Makefile
@@ -5,5 +5,5 @@ obj-$(CONFIG_FB_GEODE_GX) += gxfb.o
obj-$(CONFIG_FB_GEODE_LX) += lxfb.o
gx1fb-objs := gx1fb_core.o display_gx1.o video_cs5530.o
-gxfb-objs := gxfb_core.o display_gx.o video_gx.o
+gxfb-objs := gxfb_core.o display_gx.o video_gx.o suspend_gx.o
lxfb-objs := lxfb_core.o lxfb_ops.o
diff --git a/drivers/video/geode/display_gx.c b/drivers/video/geode/display_gx.c
index 0f16e4bffc6c..e759895bf3d3 100644
--- a/drivers/video/geode/display_gx.c
+++ b/drivers/video/geode/display_gx.c
@@ -17,31 +17,40 @@
#include <asm/io.h>
#include <asm/div64.h>
#include <asm/delay.h>
+#include <asm/geode.h>
-#include "geodefb.h"
-#include "display_gx.h"
+#include "gxfb.h"
-#ifdef CONFIG_FB_GEODE_GX_SET_FBSIZE
-unsigned int gx_frame_buffer_size(void)
-{
- return CONFIG_FB_GEODE_GX_FBSIZE;
-}
-#else
unsigned int gx_frame_buffer_size(void)
{
unsigned int val;
- /* FB size is reported by a virtual register */
+ if (!geode_has_vsa2()) {
+ uint32_t hi, lo;
+
+ /* The number of pages is (PMAX - PMIN)+1 */
+ rdmsr(MSR_GLIU_P2D_RO0, lo, hi);
+
+ /* PMAX */
+ val = ((hi & 0xff) << 12) | ((lo & 0xfff00000) >> 20);
+ /* PMIN */
+ val -= (lo & 0x000fffff);
+ val += 1;
+
+ /* The page size is 4k */
+ return (val << 12);
+ }
+
+ /* FB size can be obtained from the VSA II */
/* Virtual register class = 0x02 */
/* VG_MEM_SIZE(512Kb units) = 0x00 */
- outw(0xFC53, 0xAC1C);
- outw(0x0200, 0xAC1C);
+ outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
+ outw(VSA_VR_MEM_SIZE, VSA_VRC_INDEX);
- val = (unsigned int)(inw(0xAC1E)) & 0xFFl;
+ val = (unsigned int)(inw(VSA_VRC_DATA)) & 0xFFl;
return (val << 19);
}
-#endif
int gx_line_delta(int xres, int bpp)
{
@@ -49,75 +58,76 @@ int gx_line_delta(int xres, int bpp)
return (xres * (bpp >> 3) + 7) & ~0x7;
}
-static void gx_set_mode(struct fb_info *info)
+void gx_set_mode(struct fb_info *info)
{
- struct geodefb_par *par = info->par;
+ struct gxfb_par *par = info->par;
u32 gcfg, dcfg;
int hactive, hblankstart, hsyncstart, hsyncend, hblankend, htotal;
int vactive, vblankstart, vsyncstart, vsyncend, vblankend, vtotal;
/* Unlock the display controller registers. */
- readl(par->dc_regs + DC_UNLOCK);
- writel(DC_UNLOCK_CODE, par->dc_regs + DC_UNLOCK);
+ write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
- gcfg = readl(par->dc_regs + DC_GENERAL_CFG);
- dcfg = readl(par->dc_regs + DC_DISPLAY_CFG);
+ gcfg = read_dc(par, DC_GENERAL_CFG);
+ dcfg = read_dc(par, DC_DISPLAY_CFG);
/* Disable the timing generator. */
- dcfg &= ~(DC_DCFG_TGEN);
- writel(dcfg, par->dc_regs + DC_DISPLAY_CFG);
+ dcfg &= ~DC_DISPLAY_CFG_TGEN;
+ write_dc(par, DC_DISPLAY_CFG, dcfg);
/* Wait for pending memory requests before disabling the FIFO load. */
udelay(100);
/* Disable FIFO load and compression. */
- gcfg &= ~(DC_GCFG_DFLE | DC_GCFG_CMPE | DC_GCFG_DECE);
- writel(gcfg, par->dc_regs + DC_GENERAL_CFG);
+ gcfg &= ~(DC_GENERAL_CFG_DFLE | DC_GENERAL_CFG_CMPE |
+ DC_GENERAL_CFG_DECE);
+ write_dc(par, DC_GENERAL_CFG, gcfg);
/* Setup DCLK and its divisor. */
- par->vid_ops->set_dclk(info);
+ gx_set_dclk_frequency(info);
/*
* Setup new mode.
*/
/* Clear all unused feature bits. */
- gcfg &= DC_GCFG_YUVM | DC_GCFG_VDSE;
+ gcfg &= DC_GENERAL_CFG_YUVM | DC_GENERAL_CFG_VDSE;
dcfg = 0;
/* Set FIFO priority (default 6/5) and enable. */
/* FIXME: increase fifo priority for 1280x1024 and higher modes? */
- gcfg |= (6 << DC_GCFG_DFHPEL_POS) | (5 << DC_GCFG_DFHPSL_POS) | DC_GCFG_DFLE;
+ gcfg |= (6 << DC_GENERAL_CFG_DFHPEL_SHIFT) |
+ (5 << DC_GENERAL_CFG_DFHPSL_SHIFT) | DC_GENERAL_CFG_DFLE;
/* Framebuffer start offset. */
- writel(0, par->dc_regs + DC_FB_ST_OFFSET);
+ write_dc(par, DC_FB_ST_OFFSET, 0);
/* Line delta and line buffer length. */
- writel(info->fix.line_length >> 3, par->dc_regs + DC_GFX_PITCH);
- writel(((info->var.xres * info->var.bits_per_pixel/8) >> 3) + 2,
- par->dc_regs + DC_LINE_SIZE);
+ write_dc(par, DC_GFX_PITCH, info->fix.line_length >> 3);
+ write_dc(par, DC_LINE_SIZE,
+ ((info->var.xres * info->var.bits_per_pixel/8) >> 3) + 2);
/* Enable graphics and video data and unmask address lines. */
- dcfg |= DC_DCFG_GDEN | DC_DCFG_VDEN | DC_DCFG_A20M | DC_DCFG_A18M;
+ dcfg |= DC_DISPLAY_CFG_GDEN | DC_DISPLAY_CFG_VDEN |
+ DC_DISPLAY_CFG_A20M | DC_DISPLAY_CFG_A18M;
/* Set pixel format. */
switch (info->var.bits_per_pixel) {
case 8:
- dcfg |= DC_DCFG_DISP_MODE_8BPP;
+ dcfg |= DC_DISPLAY_CFG_DISP_MODE_8BPP;
break;
case 16:
- dcfg |= DC_DCFG_DISP_MODE_16BPP;
- dcfg |= DC_DCFG_16BPP_MODE_565;
+ dcfg |= DC_DISPLAY_CFG_DISP_MODE_16BPP;
break;
case 32:
- dcfg |= DC_DCFG_DISP_MODE_24BPP;
- dcfg |= DC_DCFG_PALB;
+ dcfg |= DC_DISPLAY_CFG_DISP_MODE_24BPP;
+ dcfg |= DC_DISPLAY_CFG_PALB;
break;
}
/* Enable timing generator. */
- dcfg |= DC_DCFG_TGEN;
+ dcfg |= DC_DISPLAY_CFG_TGEN;
/* Horizontal and vertical timings. */
hactive = info->var.xres;
@@ -134,28 +144,34 @@ static void gx_set_mode(struct fb_info *info)
vblankend = vsyncend + info->var.upper_margin;
vtotal = vblankend;
- writel((hactive - 1) | ((htotal - 1) << 16), par->dc_regs + DC_H_ACTIVE_TIMING);
- writel((hblankstart - 1) | ((hblankend - 1) << 16), par->dc_regs + DC_H_BLANK_TIMING);
- writel((hsyncstart - 1) | ((hsyncend - 1) << 16), par->dc_regs + DC_H_SYNC_TIMING);
+ write_dc(par, DC_H_ACTIVE_TIMING, (hactive - 1) |
+ ((htotal - 1) << 16));
+ write_dc(par, DC_H_BLANK_TIMING, (hblankstart - 1) |
+ ((hblankend - 1) << 16));
+ write_dc(par, DC_H_SYNC_TIMING, (hsyncstart - 1) |
+ ((hsyncend - 1) << 16));
- writel((vactive - 1) | ((vtotal - 1) << 16), par->dc_regs + DC_V_ACTIVE_TIMING);
- writel((vblankstart - 1) | ((vblankend - 1) << 16), par->dc_regs + DC_V_BLANK_TIMING);
- writel((vsyncstart - 1) | ((vsyncend - 1) << 16), par->dc_regs + DC_V_SYNC_TIMING);
+ write_dc(par, DC_V_ACTIVE_TIMING, (vactive - 1) |
+ ((vtotal - 1) << 16));
+ write_dc(par, DC_V_BLANK_TIMING, (vblankstart - 1) |
+ ((vblankend - 1) << 16));
+ write_dc(par, DC_V_SYNC_TIMING, (vsyncstart - 1) |
+ ((vsyncend - 1) << 16));
/* Write final register values. */
- writel(dcfg, par->dc_regs + DC_DISPLAY_CFG);
- writel(gcfg, par->dc_regs + DC_GENERAL_CFG);
+ write_dc(par, DC_DISPLAY_CFG, dcfg);
+ write_dc(par, DC_GENERAL_CFG, gcfg);
- par->vid_ops->configure_display(info);
+ gx_configure_display(info);
/* Relock display controller registers */
- writel(0, par->dc_regs + DC_UNLOCK);
+ write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
}
-static void gx_set_hw_palette_reg(struct fb_info *info, unsigned regno,
- unsigned red, unsigned green, unsigned blue)
+void gx_set_hw_palette_reg(struct fb_info *info, unsigned regno,
+ unsigned red, unsigned green, unsigned blue)
{
- struct geodefb_par *par = info->par;
+ struct gxfb_par *par = info->par;
int val;
/* Hardware palette is in RGB 8-8-8 format. */
@@ -163,11 +179,6 @@ static void gx_set_hw_palette_reg(struct fb_info *info, unsigned regno,
val |= (green) & 0x00ff00;
val |= (blue >> 8) & 0x0000ff;
- writel(regno, par->dc_regs + DC_PAL_ADDRESS);
- writel(val, par->dc_regs + DC_PAL_DATA);
+ write_dc(par, DC_PAL_ADDRESS, regno);
+ write_dc(par, DC_PAL_DATA, val);
}
-
-struct geode_dc_ops gx_dc_ops = {
- .set_mode = gx_set_mode,
- .set_palette_reg = gx_set_hw_palette_reg,
-};
diff --git a/drivers/video/geode/display_gx.h b/drivers/video/geode/display_gx.h
deleted file mode 100644
index 0af33f329e88..000000000000
--- a/drivers/video/geode/display_gx.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Geode GX display controller
- *
- * Copyright (C) 2006 Arcom Control Systems Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#ifndef __DISPLAY_GX_H__
-#define __DISPLAY_GX_H__
-
-unsigned int gx_frame_buffer_size(void);
-int gx_line_delta(int xres, int bpp);
-
-extern struct geode_dc_ops gx_dc_ops;
-
-/* MSR that tells us if a TFT or CRT is attached */
-#define GLD_MSR_CONFIG 0xC0002001
-#define GLD_MSR_CONFIG_DM_FP 0x40
-
-/* Display controller registers */
-
-#define DC_UNLOCK 0x00
-# define DC_UNLOCK_CODE 0x00004758
-
-#define DC_GENERAL_CFG 0x04
-# define DC_GCFG_DFLE 0x00000001
-# define DC_GCFG_CURE 0x00000002
-# define DC_GCFG_ICNE 0x00000004
-# define DC_GCFG_VIDE 0x00000008
-# define DC_GCFG_CMPE 0x00000020
-# define DC_GCFG_DECE 0x00000040
-# define DC_GCFG_VGAE 0x00000080
-# define DC_GCFG_DFHPSL_MASK 0x00000F00
-# define DC_GCFG_DFHPSL_POS 8
-# define DC_GCFG_DFHPEL_MASK 0x0000F000
-# define DC_GCFG_DFHPEL_POS 12
-# define DC_GCFG_STFM 0x00010000
-# define DC_GCFG_FDTY 0x00020000
-# define DC_GCFG_VGAFT 0x00040000
-# define DC_GCFG_VDSE 0x00080000
-# define DC_GCFG_YUVM 0x00100000
-# define DC_GCFG_VFSL 0x00800000
-# define DC_GCFG_SIGE 0x01000000
-# define DC_GCFG_SGRE 0x02000000
-# define DC_GCFG_SGFR 0x04000000
-# define DC_GCFG_CRC_MODE 0x08000000
-# define DC_GCFG_DIAG 0x10000000
-# define DC_GCFG_CFRW 0x20000000
-
-#define DC_DISPLAY_CFG 0x08
-# define DC_DCFG_TGEN 0x00000001
-# define DC_DCFG_GDEN 0x00000008
-# define DC_DCFG_VDEN 0x00000010
-# define DC_DCFG_TRUP 0x00000040
-# define DC_DCFG_DISP_MODE_MASK 0x00000300
-# define DC_DCFG_DISP_MODE_8BPP 0x00000000
-# define DC_DCFG_DISP_MODE_16BPP 0x00000100
-# define DC_DCFG_DISP_MODE_24BPP 0x00000200
-# define DC_DCFG_16BPP_MODE_MASK 0x00000c00
-# define DC_DCFG_16BPP_MODE_565 0x00000000
-# define DC_DCFG_16BPP_MODE_555 0x00000100
-# define DC_DCFG_16BPP_MODE_444 0x00000200
-# define DC_DCFG_DCEN 0x00080000
-# define DC_DCFG_PALB 0x02000000
-# define DC_DCFG_FRLK 0x04000000
-# define DC_DCFG_VISL 0x08000000
-# define DC_DCFG_FRSL 0x20000000
-# define DC_DCFG_A18M 0x40000000
-# define DC_DCFG_A20M 0x80000000
-
-#define DC_FB_ST_OFFSET 0x10
-
-#define DC_LINE_SIZE 0x30
-# define DC_LINE_SIZE_FB_LINE_SIZE_MASK 0x000007ff
-# define DC_LINE_SIZE_FB_LINE_SIZE_POS 0
-# define DC_LINE_SIZE_CB_LINE_SIZE_MASK 0x007f0000
-# define DC_LINE_SIZE_CB_LINE_SIZE_POS 16
-# define DC_LINE_SIZE_VID_LINE_SIZE_MASK 0xff000000
-# define DC_LINE_SIZE_VID_LINE_SIZE_POS 24
-
-#define DC_GFX_PITCH 0x34
-# define DC_GFX_PITCH_FB_PITCH_MASK 0x0000ffff
-# define DC_GFX_PITCH_FB_PITCH_POS 0
-# define DC_GFX_PITCH_CB_PITCH_MASK 0xffff0000
-# define DC_GFX_PITCH_CB_PITCH_POS 16
-
-#define DC_H_ACTIVE_TIMING 0x40
-#define DC_H_BLANK_TIMING 0x44
-#define DC_H_SYNC_TIMING 0x48
-#define DC_V_ACTIVE_TIMING 0x50
-#define DC_V_BLANK_TIMING 0x54
-#define DC_V_SYNC_TIMING 0x58
-
-#define DC_PAL_ADDRESS 0x70
-#define DC_PAL_DATA 0x74
-
-#define DC_GLIU0_MEM_OFFSET 0x84
-#endif /* !__DISPLAY_GX1_H__ */
diff --git a/drivers/video/geode/gxfb.h b/drivers/video/geode/gxfb.h
new file mode 100644
index 000000000000..16a96f8fd8c5
--- /dev/null
+++ b/drivers/video/geode/gxfb.h
@@ -0,0 +1,358 @@
+/*
+ * Copyright (C) 2008 Andres Salomon <dilinger@debian.org>
+ *
+ * Geode GX2 header information
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _GXFB_H_
+#define _GXFB_H_
+
+#include <linux/io.h>
+
+#define GP_REG_COUNT (0x50 / 4)
+#define DC_REG_COUNT (0x90 / 4)
+#define VP_REG_COUNT (0x138 / 8)
+#define FP_REG_COUNT (0x68 / 8)
+
+#define DC_PAL_COUNT 0x104
+
+struct gxfb_par {
+ int enable_crt;
+ void __iomem *dc_regs;
+ void __iomem *vid_regs;
+ void __iomem *gp_regs;
+#ifdef CONFIG_PM
+ int powered_down;
+
+ /* register state, for power management functionality */
+ struct {
+ uint64_t padsel;
+ uint64_t dotpll;
+ } msr;
+
+ uint32_t gp[GP_REG_COUNT];
+ uint32_t dc[DC_REG_COUNT];
+ uint64_t vp[VP_REG_COUNT];
+ uint64_t fp[FP_REG_COUNT];
+
+ uint32_t pal[DC_PAL_COUNT];
+#endif
+};
+
+unsigned int gx_frame_buffer_size(void);
+int gx_line_delta(int xres, int bpp);
+void gx_set_mode(struct fb_info *info);
+void gx_set_hw_palette_reg(struct fb_info *info, unsigned regno,
+ unsigned red, unsigned green, unsigned blue);
+
+void gx_set_dclk_frequency(struct fb_info *info);
+void gx_configure_display(struct fb_info *info);
+int gx_blank_display(struct fb_info *info, int blank_mode);
+
+#ifdef CONFIG_PM
+int gx_powerdown(struct fb_info *info);
+int gx_powerup(struct fb_info *info);
+#endif
+
+
+/* Graphics Processor registers (table 6-23 from the data book) */
+enum gp_registers {
+ GP_DST_OFFSET = 0,
+ GP_SRC_OFFSET,
+ GP_STRIDE,
+ GP_WID_HEIGHT,
+
+ GP_SRC_COLOR_FG,
+ GP_SRC_COLOR_BG,
+ GP_PAT_COLOR_0,
+ GP_PAT_COLOR_1,
+
+ GP_PAT_COLOR_2,
+ GP_PAT_COLOR_3,
+ GP_PAT_COLOR_4,
+ GP_PAT_COLOR_5,
+
+ GP_PAT_DATA_0,
+ GP_PAT_DATA_1,
+ GP_RASTER_MODE,
+ GP_VECTOR_MODE,
+
+ GP_BLT_MODE,
+ GP_BLT_STATUS,
+ GP_HST_SRC,
+ GP_BASE_OFFSET, /* 0x4c */
+};
+
+#define GP_BLT_STATUS_BLT_PENDING (1 << 2)
+#define GP_BLT_STATUS_BLT_BUSY (1 << 0)
+
+
+/* Display Controller registers (table 6-38 from the data book) */
+enum dc_registers {
+ DC_UNLOCK = 0,
+ DC_GENERAL_CFG,
+ DC_DISPLAY_CFG,
+ DC_RSVD_0,
+
+ DC_FB_ST_OFFSET,
+ DC_CB_ST_OFFSET,
+ DC_CURS_ST_OFFSET,
+ DC_ICON_ST_OFFSET,
+
+ DC_VID_Y_ST_OFFSET,
+ DC_VID_U_ST_OFFSET,
+ DC_VID_V_ST_OFFSET,
+ DC_RSVD_1,
+
+ DC_LINE_SIZE,
+ DC_GFX_PITCH,
+ DC_VID_YUV_PITCH,
+ DC_RSVD_2,
+
+ DC_H_ACTIVE_TIMING,
+ DC_H_BLANK_TIMING,
+ DC_H_SYNC_TIMING,
+ DC_RSVD_3,
+
+ DC_V_ACTIVE_TIMING,
+ DC_V_BLANK_TIMING,
+ DC_V_SYNC_TIMING,
+ DC_RSVD_4,
+
+ DC_CURSOR_X,
+ DC_CURSOR_Y,
+ DC_ICON_X,
+ DC_LINE_CNT,
+
+ DC_PAL_ADDRESS,
+ DC_PAL_DATA,
+ DC_DFIFO_DIAG,
+ DC_CFIFO_DIAG,
+
+ DC_VID_DS_DELTA,
+ DC_GLIU0_MEM_OFFSET,
+ DC_RSVD_5,
+ DC_DV_ACC, /* 0x8c */
+};
+
+#define DC_UNLOCK_LOCK 0x00000000
+#define DC_UNLOCK_UNLOCK 0x00004758 /* magic value */
+
+#define DC_GENERAL_CFG_YUVM (1 << 20)
+#define DC_GENERAL_CFG_VDSE (1 << 19)
+#define DC_GENERAL_CFG_DFHPEL_SHIFT 12
+#define DC_GENERAL_CFG_DFHPSL_SHIFT 8
+#define DC_GENERAL_CFG_DECE (1 << 6)
+#define DC_GENERAL_CFG_CMPE (1 << 5)
+#define DC_GENERAL_CFG_VIDE (1 << 3)
+#define DC_GENERAL_CFG_ICNE (1 << 2)
+#define DC_GENERAL_CFG_CURE (1 << 1)
+#define DC_GENERAL_CFG_DFLE (1 << 0)
+
+#define DC_DISPLAY_CFG_A20M (1 << 31)
+#define DC_DISPLAY_CFG_A18M (1 << 30)
+#define DC_DISPLAY_CFG_PALB (1 << 25)
+#define DC_DISPLAY_CFG_DISP_MODE_24BPP (1 << 9)
+#define DC_DISPLAY_CFG_DISP_MODE_16BPP (1 << 8)
+#define DC_DISPLAY_CFG_DISP_MODE_8BPP (0)
+#define DC_DISPLAY_CFG_VDEN (1 << 4)
+#define DC_DISPLAY_CFG_GDEN (1 << 3)
+#define DC_DISPLAY_CFG_TGEN (1 << 0)
+
+
+/*
+ * Video Processor registers (table 6-54).
+ * There is space for 64 bit values, but we never use more than the
+ * lower 32 bits. The actual register save/restore code only bothers
+ * to restore those 32 bits.
+ */
+enum vp_registers {
+ VP_VCFG = 0,
+ VP_DCFG,
+
+ VP_VX,
+ VP_VY,
+
+ VP_VS,
+ VP_VCK,
+
+ VP_VCM,
+ VP_GAR,
+
+ VP_GDR,
+ VP_RSVD_0,
+
+ VP_MISC,
+ VP_CCS,
+
+ VP_RSVD_1,
+ VP_RSVD_2,
+
+ VP_RSVD_3,
+ VP_VDC,
+
+ VP_VCO,
+ VP_CRC,
+
+ VP_CRC32,
+ VP_VDE,
+
+ VP_CCK,
+ VP_CCM,
+
+ VP_CC1,
+ VP_CC2,
+
+ VP_A1X,
+ VP_A1Y,
+
+ VP_A1C,
+ VP_A1T,
+
+ VP_A2X,
+ VP_A2Y,
+
+ VP_A2C,
+ VP_A2T,
+
+ VP_A3X,
+ VP_A3Y,
+
+ VP_A3C,
+ VP_A3T,
+
+ VP_VRR,
+ VP_AWT,
+
+ VP_VTM, /* 0x130 */
+};
+
+#define VP_VCFG_VID_EN (1 << 0)
+
+#define VP_DCFG_DAC_VREF (1 << 26)
+#define VP_DCFG_GV_GAM (1 << 21)
+#define VP_DCFG_VG_CK (1 << 20)
+#define VP_DCFG_CRT_SYNC_SKW_DEFAULT (1 << 16)
+#define VP_DCFG_CRT_SYNC_SKW ((1 << 14) | (1 << 15) | (1 << 16))
+#define VP_DCFG_CRT_VSYNC_POL (1 << 9)
+#define VP_DCFG_CRT_HSYNC_POL (1 << 8)
+#define VP_DCFG_FP_DATA_EN (1 << 7) /* undocumented */
+#define VP_DCFG_FP_PWR_EN (1 << 6) /* undocumented */
+#define VP_DCFG_DAC_BL_EN (1 << 3)
+#define VP_DCFG_VSYNC_EN (1 << 2)
+#define VP_DCFG_HSYNC_EN (1 << 1)
+#define VP_DCFG_CRT_EN (1 << 0)
+
+#define VP_MISC_GAM_EN (1 << 0)
+#define VP_MISC_DACPWRDN (1 << 10)
+#define VP_MISC_APWRDN (1 << 11)
+
+
+/*
+ * Flat Panel registers (table 6-55).
+ * Also 64 bit registers; see above note about 32-bit handling.
+ */
+
+/* we're actually in the VP register space, starting at address 0x400 */
+#define VP_FP_START 0x400
+
+enum fp_registers {
+ FP_PT1 = 0,
+ FP_PT2,
+
+ FP_PM,
+ FP_DFC,
+
+ FP_BLFSR,
+ FP_RLFSR,
+
+ FP_FMI,
+ FP_FMD,
+
+ FP_RSVD_0,
+ FP_DCA,
+
+ FP_DMD,
+ FP_CRC,
+
+ FP_FBB, /* 0x460 */
+};
+
+#define FP_PT1_VSIZE_SHIFT 16 /* undocumented? */
+#define FP_PT1_VSIZE_MASK 0x7FF0000 /* undocumented? */
+
+#define FP_PT2_HSP (1 << 22)
+#define FP_PT2_VSP (1 << 23)
+
+#define FP_PM_P (1 << 24) /* panel power on */
+#define FP_PM_PANEL_PWR_UP (1 << 3) /* r/o */
+#define FP_PM_PANEL_PWR_DOWN (1 << 2) /* r/o */
+#define FP_PM_PANEL_OFF (1 << 1) /* r/o */
+#define FP_PM_PANEL_ON (1 << 0) /* r/o */
+
+#define FP_DFC_NFI ((1 << 4) | (1 << 5) | (1 << 6))
+
+
+/* register access functions */
+
+static inline uint32_t read_gp(struct gxfb_par *par, int reg)
+{
+ return readl(par->gp_regs + 4*reg);
+}
+
+static inline void write_gp(struct gxfb_par *par, int reg, uint32_t val)
+{
+ writel(val, par->gp_regs + 4*reg);
+}
+
+static inline uint32_t read_dc(struct gxfb_par *par, int reg)
+{
+ return readl(par->dc_regs + 4*reg);
+}
+
+static inline void write_dc(struct gxfb_par *par, int reg, uint32_t val)
+{
+ writel(val, par->dc_regs + 4*reg);
+}
+
+static inline uint32_t read_vp(struct gxfb_par *par, int reg)
+{
+ return readl(par->vid_regs + 8*reg);
+}
+
+static inline void write_vp(struct gxfb_par *par, int reg, uint32_t val)
+{
+ writel(val, par->vid_regs + 8*reg);
+}
+
+static inline uint32_t read_fp(struct gxfb_par *par, int reg)
+{
+ return readl(par->vid_regs + 8*reg + VP_FP_START);
+}
+
+static inline void write_fp(struct gxfb_par *par, int reg, uint32_t val)
+{
+ writel(val, par->vid_regs + 8*reg + VP_FP_START);
+}
+
+
+/* MSRs are defined in asm/geode.h; their bitfields are here */
+
+#define MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 (1 << 3)
+#define MSR_GLCP_SYS_RSTPLL_DOTPREMULT2 (1 << 2)
+#define MSR_GLCP_SYS_RSTPLL_DOTPREDIV2 (1 << 1)
+
+#define MSR_GLCP_DOTPLL_LOCK (1 << 25) /* r/o */
+#define MSR_GLCP_DOTPLL_BYPASS (1 << 15)
+#define MSR_GLCP_DOTPLL_DOTRESET (1 << 0)
+
+#define MSR_GX_MSR_PADSEL_MASK 0x3FFFFFFF /* undocumented? */
+#define MSR_GX_MSR_PADSEL_TFT 0x1FFFFFFF /* undocumented? */
+
+#define MSR_GX_GLD_MSR_CONFIG_FP (1 << 3)
+
+#endif
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
index cf841efa229a..de2b8f9876a5 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/geode/gxfb_core.c
@@ -28,17 +28,20 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
+#include <linux/console.h>
+#include <linux/suspend.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <asm/geode.h>
-#include "geodefb.h"
-#include "display_gx.h"
-#include "video_gx.h"
+#include "gxfb.h"
static char *mode_option;
+static int vram;
+static int vt_switch;
/* Modes relevant to the GX (taken from modedb.c) */
-static const struct fb_videomode gx_modedb[] __initdata = {
+static struct fb_videomode gx_modedb[] __initdata = {
/* 640x480-60 VESA */
{ NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
@@ -105,6 +108,35 @@ static const struct fb_videomode gx_modedb[] __initdata = {
FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
};
+#ifdef CONFIG_OLPC
+#include <asm/olpc.h>
+
+static struct fb_videomode gx_dcon_modedb[] __initdata = {
+ /* The only mode the DCON has is 1200x900 */
+ { NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED, 0 }
+};
+
+static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size)
+{
+ if (olpc_has_dcon()) {
+ *modedb = (struct fb_videomode *) gx_dcon_modedb;
+ *size = ARRAY_SIZE(gx_dcon_modedb);
+ } else {
+ *modedb = (struct fb_videomode *) gx_modedb;
+ *size = ARRAY_SIZE(gx_modedb);
+ }
+}
+
+#else
+static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size)
+{
+ *modedb = (struct fb_videomode *) gx_modedb;
+ *size = ARRAY_SIZE(gx_modedb);
+}
+#endif
+
static int gxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
if (var->xres > 1600 || var->yres > 1200)
@@ -139,8 +171,6 @@ static int gxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
static int gxfb_set_par(struct fb_info *info)
{
- struct geodefb_par *par = info->par;
-
if (info->var.bits_per_pixel > 8) {
info->fix.visual = FB_VISUAL_TRUECOLOR;
fb_dealloc_cmap(&info->cmap);
@@ -151,7 +181,7 @@ static int gxfb_set_par(struct fb_info *info)
info->fix.line_length = gx_line_delta(info->var.xres, info->var.bits_per_pixel);
- par->dc_ops->set_mode(info);
+ gx_set_mode(info);
return 0;
}
@@ -167,8 +197,6 @@ static int gxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
- struct geodefb_par *par = info->par;
-
if (info->var.grayscale) {
/* grayscale = 0.30*R + 0.59*G + 0.11*B */
red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
@@ -191,7 +219,7 @@ static int gxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
if (regno >= 256)
return -EINVAL;
- par->dc_ops->set_palette_reg(info, regno, red, green, blue);
+ gx_set_hw_palette_reg(info, regno, red, green, blue);
}
return 0;
@@ -199,15 +227,12 @@ static int gxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
static int gxfb_blank(int blank_mode, struct fb_info *info)
{
- struct geodefb_par *par = info->par;
-
- return par->vid_ops->blank_display(info, blank_mode);
+ return gx_blank_display(info, blank_mode);
}
static int __init gxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
{
- struct geodefb_par *par = info->par;
- int fb_len;
+ struct gxfb_par *par = info->par;
int ret;
ret = pci_enable_device(dev);
@@ -229,24 +254,31 @@ static int __init gxfb_map_video_memory(struct fb_info *info, struct pci_dev *de
if (!par->dc_regs)
return -ENOMEM;
- ret = pci_request_region(dev, 0, "gxfb (framebuffer)");
+ ret = pci_request_region(dev, 1, "gxfb (graphics processor)");
if (ret < 0)
return ret;
- if ((fb_len = gx_frame_buffer_size()) < 0)
+ par->gp_regs = ioremap(pci_resource_start(dev, 1),
+ pci_resource_len(dev, 1));
+
+ if (!par->gp_regs)
return -ENOMEM;
+
+ ret = pci_request_region(dev, 0, "gxfb (framebuffer)");
+ if (ret < 0)
+ return ret;
+
info->fix.smem_start = pci_resource_start(dev, 0);
- info->fix.smem_len = fb_len;
+ info->fix.smem_len = vram ? vram : gx_frame_buffer_size();
info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
if (!info->screen_base)
return -ENOMEM;
- /* Set the 16MB aligned base address of the graphics memory region
+ /* Set the 16MiB aligned base address of the graphics memory region
* in the display controller */
- writel(info->fix.smem_start & 0xFF000000,
- par->dc_regs + DC_GLIU0_MEM_OFFSET);
+ write_dc(par, DC_GLIU0_MEM_OFFSET, info->fix.smem_start & 0xFF000000);
- dev_info(&dev->dev, "%d Kibyte of video memory at 0x%lx\n",
+ dev_info(&dev->dev, "%d KiB of video memory at 0x%lx\n",
info->fix.smem_len / 1024, info->fix.smem_start);
return 0;
@@ -266,11 +298,12 @@ static struct fb_ops gxfb_ops = {
static struct fb_info * __init gxfb_init_fbinfo(struct device *dev)
{
- struct geodefb_par *par;
+ struct gxfb_par *par;
struct fb_info *info;
/* Alloc enough space for the pseudo palette. */
- info = framebuffer_alloc(sizeof(struct geodefb_par) + sizeof(u32) * 16, dev);
+ info = framebuffer_alloc(sizeof(struct gxfb_par) + sizeof(u32) * 16,
+ dev);
if (!info)
return NULL;
@@ -296,29 +329,64 @@ static struct fb_info * __init gxfb_init_fbinfo(struct device *dev)
info->flags = FBINFO_DEFAULT;
info->node = -1;
- info->pseudo_palette = (void *)par + sizeof(struct geodefb_par);
+ info->pseudo_palette = (void *)par + sizeof(struct gxfb_par);
info->var.grayscale = 0;
return info;
}
+#ifdef CONFIG_PM
+static int gxfb_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct fb_info *info = pci_get_drvdata(pdev);
+
+ if (state.event == PM_EVENT_SUSPEND) {
+ acquire_console_sem();
+ gx_powerdown(info);
+ fb_set_suspend(info, 1);
+ release_console_sem();
+ }
+
+ /* there's no point in setting PCI states; we emulate PCI, so
+ * we don't end up getting power savings anyways */
+
+ return 0;
+}
+
+static int gxfb_resume(struct pci_dev *pdev)
+{
+ struct fb_info *info = pci_get_drvdata(pdev);
+ int ret;
+
+ acquire_console_sem();
+ ret = gx_powerup(info);
+ if (ret) {
+ printk(KERN_ERR "gxfb: power up failed!\n");
+ return ret;
+ }
+
+ fb_set_suspend(info, 0);
+ release_console_sem();
+ return 0;
+}
+#endif
+
static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct geodefb_par *par;
+ struct gxfb_par *par;
struct fb_info *info;
int ret;
unsigned long val;
+ struct fb_videomode *modedb_ptr;
+ unsigned int modedb_size;
+
info = gxfb_init_fbinfo(&pdev->dev);
if (!info)
return -ENOMEM;
par = info->par;
- /* GX display controller and GX video device. */
- par->dc_ops = &gx_dc_ops;
- par->vid_ops = &gx_vid_ops;
-
if ((ret = gxfb_map_video_memory(info, pdev)) < 0) {
dev_err(&pdev->dev, "failed to map frame buffer or controller registers\n");
goto err;
@@ -326,15 +394,16 @@ static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *i
/* Figure out if this is a TFT or CRT part */
- rdmsrl(GLD_MSR_CONFIG, val);
+ rdmsrl(MSR_GX_GLD_MSR_CONFIG, val);
- if ((val & GLD_MSR_CONFIG_DM_FP) == GLD_MSR_CONFIG_DM_FP)
+ if ((val & MSR_GX_GLD_MSR_CONFIG_FP) == MSR_GX_GLD_MSR_CONFIG_FP)
par->enable_crt = 0;
else
par->enable_crt = 1;
+ get_modedb(&modedb_ptr, &modedb_size);
ret = fb_find_mode(&info->var, info, mode_option,
- gx_modedb, ARRAY_SIZE(gx_modedb), NULL, 16);
+ modedb_ptr, modedb_size, NULL, 16);
if (ret == 0 || ret == 4) {
dev_err(&pdev->dev, "could not find valid video mode\n");
ret = -EINVAL;
@@ -348,6 +417,8 @@ static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *i
gxfb_check_var(&info->var, info);
gxfb_set_par(info);
+ pm_set_vt_switch(vt_switch);
+
if (register_framebuffer(info) < 0) {
ret = -EINVAL;
goto err;
@@ -369,6 +440,10 @@ static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *i
iounmap(par->dc_regs);
pci_release_region(pdev, 2);
}
+ if (par->gp_regs) {
+ iounmap(par->gp_regs);
+ pci_release_region(pdev, 1);
+ }
if (info)
framebuffer_release(info);
@@ -378,7 +453,7 @@ static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *i
static void gxfb_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
- struct geodefb_par *par = info->par;
+ struct gxfb_par *par = info->par;
unregister_framebuffer(info);
@@ -391,15 +466,16 @@ static void gxfb_remove(struct pci_dev *pdev)
iounmap(par->dc_regs);
pci_release_region(pdev, 2);
+ iounmap(par->gp_regs);
+ pci_release_region(pdev, 1);
+
pci_set_drvdata(pdev, NULL);
framebuffer_release(info);
}
static struct pci_device_id gxfb_id_table[] = {
- { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_GX_VIDEO,
- PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY << 16,
- 0xff0000, 0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_GX_VIDEO) },
{ 0, }
};
@@ -410,6 +486,10 @@ static struct pci_driver gxfb_driver = {
.id_table = gxfb_id_table,
.probe = gxfb_probe,
.remove = gxfb_remove,
+#ifdef CONFIG_PM
+ .suspend = gxfb_suspend,
+ .resume = gxfb_resume,
+#endif
};
#ifndef MODULE
@@ -456,5 +536,11 @@ module_exit(gxfb_cleanup);
module_param(mode_option, charp, 0);
MODULE_PARM_DESC(mode_option, "video mode (<x>x<y>[-<bpp>][@<refr>])");
+module_param(vram, int, 0);
+MODULE_PARM_DESC(vram, "video memory size");
+
+module_param(vt_switch, int, 0);
+MODULE_PARM_DESC(vt_switch, "enable VT switch during suspend/resume");
+
MODULE_DESCRIPTION("Framebuffer driver for the AMD Geode GX");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/geode/lxfb.h b/drivers/video/geode/lxfb.h
index ca13c48d19b0..3b9416f4ee20 100644
--- a/drivers/video/geode/lxfb.h
+++ b/drivers/video/geode/lxfb.h
@@ -3,17 +3,46 @@
#include <linux/fb.h>
+#define GP_REG_COUNT (0x7c / 4)
+#define DC_REG_COUNT (0xf0 / 4)
+#define VP_REG_COUNT (0x158 / 8)
+#define FP_REG_COUNT (0x60 / 8)
+
+#define DC_PAL_COUNT 0x104
+#define DC_HFILT_COUNT 0x100
+#define DC_VFILT_COUNT 0x100
+#define VP_COEFF_SIZE 0x1000
+
#define OUTPUT_CRT 0x01
#define OUTPUT_PANEL 0x02
struct lxfb_par {
int output;
- int panel_width;
- int panel_height;
void __iomem *gp_regs;
void __iomem *dc_regs;
- void __iomem *df_regs;
+ void __iomem *vp_regs;
+#ifdef CONFIG_PM
+ int powered_down;
+
+ /* register state, for power mgmt functionality */
+ struct {
+ uint64_t padsel;
+ uint64_t dotpll;
+ uint64_t dfglcfg;
+ uint64_t dcspare;
+ } msr;
+
+ uint32_t gp[GP_REG_COUNT];
+ uint32_t dc[DC_REG_COUNT];
+ uint64_t vp[VP_REG_COUNT];
+ uint64_t fp[FP_REG_COUNT];
+
+ uint32_t pal[DC_PAL_COUNT];
+ uint32_t hcoeff[DC_HFILT_COUNT * 2];
+ uint32_t vcoeff[DC_VFILT_COUNT];
+ uint32_t vp_coeff[VP_COEFF_SIZE / 4];
+#endif
};
static inline unsigned int lx_get_pitch(unsigned int xres, int bpp)
@@ -29,171 +58,383 @@ int lx_blank_display(struct fb_info *, int);
void lx_set_palette_reg(struct fb_info *, unsigned int, unsigned int,
unsigned int, unsigned int);
-/* MSRS */
+#ifdef CONFIG_PM
+int lx_powerdown(struct fb_info *info);
+int lx_powerup(struct fb_info *info);
+#endif
+
+
+/* Graphics Processor registers (table 6-29 from the data book) */
+enum gp_registers {
+ GP_DST_OFFSET = 0,
+ GP_SRC_OFFSET,
+ GP_STRIDE,
+ GP_WID_HEIGHT,
+
+ GP_SRC_COLOR_FG,
+ GP_SRC_COLOR_BG,
+ GP_PAT_COLOR_0,
+ GP_PAT_COLOR_1,
+
+ GP_PAT_COLOR_2,
+ GP_PAT_COLOR_3,
+ GP_PAT_COLOR_4,
+ GP_PAT_COLOR_5,
+
+ GP_PAT_DATA_0,
+ GP_PAT_DATA_1,
+ GP_RASTER_MODE,
+ GP_VECTOR_MODE,
+
+ GP_BLT_MODE,
+ GP_BLT_STATUS,
+ GP_HST_SRC,
+ GP_BASE_OFFSET,
+
+ GP_CMD_TOP,
+ GP_CMD_BOT,
+ GP_CMD_READ,
+ GP_CMD_WRITE,
+
+ GP_CH3_OFFSET,
+ GP_CH3_MODE_STR,
+ GP_CH3_WIDHI,
+ GP_CH3_HSRC,
+
+ GP_LUT_INDEX,
+ GP_LUT_DATA,
+ GP_INT_CNTRL, /* 0x78 */
+};
+
+#define GP_BLT_STATUS_CE (1 << 4) /* cmd buf empty */
+#define GP_BLT_STATUS_PB (1 << 0) /* primative busy */
+
+
+/* Display Controller registers (table 6-47 from the data book) */
+enum dc_registers {
+ DC_UNLOCK = 0,
+ DC_GENERAL_CFG,
+ DC_DISPLAY_CFG,
+ DC_ARB_CFG,
+
+ DC_FB_ST_OFFSET,
+ DC_CB_ST_OFFSET,
+ DC_CURS_ST_OFFSET,
+ DC_RSVD_0,
+
+ DC_VID_Y_ST_OFFSET,
+ DC_VID_U_ST_OFFSET,
+ DC_VID_V_ST_OFFSET,
+ DC_DV_TOP,
+
+ DC_LINE_SIZE,
+ DC_GFX_PITCH,
+ DC_VID_YUV_PITCH,
+ DC_RSVD_1,
+
+ DC_H_ACTIVE_TIMING,
+ DC_H_BLANK_TIMING,
+ DC_H_SYNC_TIMING,
+ DC_RSVD_2,
+
+ DC_V_ACTIVE_TIMING,
+ DC_V_BLANK_TIMING,
+ DC_V_SYNC_TIMING,
+ DC_FB_ACTIVE,
+
+ DC_CURSOR_X,
+ DC_CURSOR_Y,
+ DC_RSVD_3,
+ DC_LINE_CNT,
+
+ DC_PAL_ADDRESS,
+ DC_PAL_DATA,
+ DC_DFIFO_DIAG,
+ DC_CFIFO_DIAG,
+
+ DC_VID_DS_DELTA,
+ DC_GLIU0_MEM_OFFSET,
+ DC_DV_CTL,
+ DC_DV_ACCESS,
+
+ DC_GFX_SCALE,
+ DC_IRQ_FILT_CTL,
+ DC_FILT_COEFF1,
+ DC_FILT_COEFF2,
+
+ DC_VBI_EVEN_CTL,
+ DC_VBI_ODD_CTL,
+ DC_VBI_HOR,
+ DC_VBI_LN_ODD,
+
+ DC_VBI_LN_EVEN,
+ DC_VBI_PITCH,
+ DC_CLR_KEY,
+ DC_CLR_KEY_MASK,
+
+ DC_CLR_KEY_X,
+ DC_CLR_KEY_Y,
+ DC_IRQ,
+ DC_RSVD_4,
+
+ DC_RSVD_5,
+ DC_GENLK_CTL,
+ DC_VID_EVEN_Y_ST_OFFSET,
+ DC_VID_EVEN_U_ST_OFFSET,
+
+ DC_VID_EVEN_V_ST_OFFSET,
+ DC_V_ACTIVE_EVEN_TIMING,
+ DC_V_BLANK_EVEN_TIMING,
+ DC_V_SYNC_EVEN_TIMING, /* 0xec */
+};
+
+#define DC_UNLOCK_LOCK 0x00000000
+#define DC_UNLOCK_UNLOCK 0x00004758 /* magic value */
+
+#define DC_GENERAL_CFG_FDTY (1 << 17)
+#define DC_GENERAL_CFG_DFHPEL_SHIFT (12)
+#define DC_GENERAL_CFG_DFHPSL_SHIFT (8)
+#define DC_GENERAL_CFG_VGAE (1 << 7)
+#define DC_GENERAL_CFG_DECE (1 << 6)
+#define DC_GENERAL_CFG_CMPE (1 << 5)
+#define DC_GENERAL_CFG_VIDE (1 << 3)
+#define DC_GENERAL_CFG_DFLE (1 << 0)
+
+#define DC_DISPLAY_CFG_VISL (1 << 27)
+#define DC_DISPLAY_CFG_PALB (1 << 25)
+#define DC_DISPLAY_CFG_DCEN (1 << 24)
+#define DC_DISPLAY_CFG_DISP_MODE_24BPP (1 << 9)
+#define DC_DISPLAY_CFG_DISP_MODE_16BPP (1 << 8)
+#define DC_DISPLAY_CFG_DISP_MODE_8BPP (0)
+#define DC_DISPLAY_CFG_TRUP (1 << 6)
+#define DC_DISPLAY_CFG_VDEN (1 << 4)
+#define DC_DISPLAY_CFG_GDEN (1 << 3)
+#define DC_DISPLAY_CFG_TGEN (1 << 0)
+
+#define DC_DV_TOP_DV_TOP_EN (1 << 0)
+
+#define DC_DV_CTL_DV_LINE_SIZE ((1 << 10) | (1 << 11))
+#define DC_DV_CTL_DV_LINE_SIZE_1K (0)
+#define DC_DV_CTL_DV_LINE_SIZE_2K (1 << 10)
+#define DC_DV_CTL_DV_LINE_SIZE_4K (1 << 11)
+#define DC_DV_CTL_DV_LINE_SIZE_8K ((1 << 10) | (1 << 11))
+#define DC_DV_CTL_CLEAR_DV_RAM (1 << 0)
+
+#define DC_IRQ_FILT_CTL_H_FILT_SEL (1 << 10)
+
+#define DC_CLR_KEY_CLR_KEY_EN (1 << 24)
+
+#define DC_IRQ_VIP_VSYNC_IRQ_STATUS (1 << 21) /* undocumented? */
+#define DC_IRQ_STATUS (1 << 20) /* undocumented? */
+#define DC_IRQ_VIP_VSYNC_LOSS_IRQ_MASK (1 << 1)
+#define DC_IRQ_MASK (1 << 0)
-#define MSR_LX_GLD_CONFIG 0x48002001
-#define MSR_LX_GLCP_DOTPLL 0x4c000015
-#define MSR_LX_DF_PADSEL 0x48002011
-#define MSR_LX_DC_SPARE 0x80000011
-#define MSR_LX_DF_GLCONFIG 0x48002001
-
-#define MSR_LX_GLIU0_P2D_RO0 0x10000029
-
-#define GLCP_DOTPLL_RESET (1 << 0)
-#define GLCP_DOTPLL_BYPASS (1 << 15)
-#define GLCP_DOTPLL_HALFPIX (1 << 24)
-#define GLCP_DOTPLL_LOCK (1 << 25)
-
-#define DF_CONFIG_OUTPUT_MASK 0x38
-#define DF_OUTPUT_PANEL 0x08
-#define DF_OUTPUT_CRT 0x00
-#define DF_SIMULTANEOUS_CRT_AND_FP (1 << 15)
-
-#define DF_DEFAULT_TFT_PAD_SEL_LOW 0xDFFFFFFF
-#define DF_DEFAULT_TFT_PAD_SEL_HIGH 0x0000003F
-
-#define DC_SPARE_DISABLE_CFIFO_HGO 0x00000800
-#define DC_SPARE_VFIFO_ARB_SELECT 0x00000400
-#define DC_SPARE_WM_LPEN_OVRD 0x00000200
-#define DC_SPARE_LOAD_WM_LPEN_MASK 0x00000100
-#define DC_SPARE_DISABLE_INIT_VID_PRI 0x00000080
-#define DC_SPARE_DISABLE_VFIFO_WM 0x00000040
-#define DC_SPARE_DISABLE_CWD_CHECK 0x00000020
-#define DC_SPARE_PIX8_PAN_FIX 0x00000010
-#define DC_SPARE_FIRST_REQ_MASK 0x00000002
-
-/* Registers */
-
-#define DC_UNLOCK 0x00
-#define DC_UNLOCK_CODE 0x4758
+#define DC_GENLK_CTL_FLICK_SEL_MASK (0x0F << 28)
+#define DC_GENLK_CTL_ALPHA_FLICK_EN (1 << 25)
+#define DC_GENLK_CTL_FLICK_EN (1 << 24)
+#define DC_GENLK_CTL_GENLK_EN (1 << 18)
-#define DC_GENERAL_CFG 0x04
-#define DC_GCFG_DFLE (1 << 0)
-#define DC_GCFG_VIDE (1 << 3)
-#define DC_GCFG_VGAE (1 << 7)
-#define DC_GCFG_CMPE (1 << 5)
-#define DC_GCFG_DECE (1 << 6)
-#define DC_GCFG_FDTY (1 << 17)
-#define DC_DISPLAY_CFG 0x08
-#define DC_DCFG_TGEN (1 << 0)
-#define DC_DCFG_GDEN (1 << 3)
-#define DC_DCFG_VDEN (1 << 4)
-#define DC_DCFG_TRUP (1 << 6)
-#define DC_DCFG_DCEN (1 << 24)
-#define DC_DCFG_PALB (1 << 25)
-#define DC_DCFG_VISL (1 << 27)
+/*
+ * Video Processor registers (table 6-71).
+ * There is space for 64 bit values, but we never use more than the
+ * lower 32 bits. The actual register save/restore code only bothers
+ * to restore those 32 bits.
+ */
+enum vp_registers {
+ VP_VCFG = 0,
+ VP_DCFG,
-#define DC_DCFG_16BPP 0x0
+ VP_VX,
+ VP_VY,
-#define DC_DCFG_DISP_MODE_MASK 0x00000300
-#define DC_DCFG_DISP_MODE_8BPP 0x00000000
-#define DC_DCFG_DISP_MODE_16BPP 0x00000100
-#define DC_DCFG_DISP_MODE_24BPP 0x00000200
-#define DC_DCFG_DISP_MODE_32BPP 0x00000300
+ VP_SCL,
+ VP_VCK,
+ VP_VCM,
+ VP_PAR,
-#define DC_ARB_CFG 0x0C
+ VP_PDR,
+ VP_SLR,
-#define DC_FB_START 0x10
-#define DC_CB_START 0x14
-#define DC_CURSOR_START 0x18
+ VP_MISC,
+ VP_CCS,
-#define DC_DV_TOP 0x2C
-#define DC_DV_TOP_ENABLE (1 << 0)
+ VP_VYS,
+ VP_VXS,
-#define DC_LINE_SIZE 0x30
-#define DC_GRAPHICS_PITCH 0x34
-#define DC_H_ACTIVE_TIMING 0x40
-#define DC_H_BLANK_TIMING 0x44
-#define DC_H_SYNC_TIMING 0x48
-#define DC_V_ACTIVE_TIMING 0x50
-#define DC_V_BLANK_TIMING 0x54
-#define DC_V_SYNC_TIMING 0x58
-#define DC_FB_ACTIVE 0x5C
+ VP_RSVD_0,
+ VP_VDC,
+
+ VP_RSVD_1,
+ VP_CRC,
+
+ VP_CRC32,
+ VP_VDE,
+
+ VP_CCK,
+ VP_CCM,
+
+ VP_CC1,
+ VP_CC2,
+
+ VP_A1X,
+ VP_A1Y,
+
+ VP_A1C,
+ VP_A1T,
+
+ VP_A2X,
+ VP_A2Y,
+
+ VP_A2C,
+ VP_A2T,
+
+ VP_A3X,
+ VP_A3Y,
+
+ VP_A3C,
+ VP_A3T,
+
+ VP_VRR,
+ VP_AWT,
+
+ VP_VTM,
+ VP_VYE,
+
+ VP_A1YE,
+ VP_A2YE,
+
+ VP_A3YE, /* 0x150 */
+
+ VP_VCR = 0x1000, /* 0x1000 - 0x1fff */
+};
-#define DC_PAL_ADDRESS 0x70
-#define DC_PAL_DATA 0x74
+#define VP_VCFG_VID_EN (1 << 0)
-#define DC_PHY_MEM_OFFSET 0x84
+#define VP_DCFG_GV_GAM (1 << 21)
+#define VP_DCFG_PWR_SEQ_DELAY ((1 << 17) | (1 << 18) | (1 << 19))
+#define VP_DCFG_PWR_SEQ_DELAY_DEFAULT (1 << 19) /* undocumented */
+#define VP_DCFG_CRT_SYNC_SKW ((1 << 14) | (1 << 15) | (1 << 16))
+#define VP_DCFG_CRT_SYNC_SKW_DEFAULT (1 << 16)
+#define VP_DCFG_CRT_VSYNC_POL (1 << 9)
+#define VP_DCFG_CRT_HSYNC_POL (1 << 8)
+#define VP_DCFG_DAC_BL_EN (1 << 3)
+#define VP_DCFG_VSYNC_EN (1 << 2)
+#define VP_DCFG_HSYNC_EN (1 << 1)
+#define VP_DCFG_CRT_EN (1 << 0)
-#define DC_DV_CTL 0x88
-#define DC_DV_LINE_SIZE_MASK 0x00000C00
-#define DC_DV_LINE_SIZE_1024 0x00000000
-#define DC_DV_LINE_SIZE_2048 0x00000400
-#define DC_DV_LINE_SIZE_4096 0x00000800
-#define DC_DV_LINE_SIZE_8192 0x00000C00
+#define VP_MISC_APWRDN (1 << 11)
+#define VP_MISC_DACPWRDN (1 << 10)
+#define VP_MISC_BYP_BOTH (1 << 0)
-#define DC_GFX_SCALE 0x90
-#define DC_IRQ_FILT_CTL 0x94
+/*
+ * Flat Panel registers (table 6-71).
+ * Also 64 bit registers; see above note about 32-bit handling.
+ */
+/* we're actually in the VP register space, starting at address 0x400 */
+#define VP_FP_START 0x400
-#define DC_IRQ 0xC8
-#define DC_IRQ_MASK (1 << 0)
-#define DC_VSYNC_IRQ_MASK (1 << 1)
-#define DC_IRQ_STATUS (1 << 20)
-#define DC_VSYNC_IRQ_STATUS (1 << 21)
-
-#define DC_GENLCK_CTRL 0xD4
-#define DC_GENLCK_ENABLE (1 << 18)
-#define DC_GC_ALPHA_FLICK_ENABLE (1 << 25)
-#define DC_GC_FLICKER_FILTER_ENABLE (1 << 24)
-#define DC_GC_FLICKER_FILTER_MASK (0x0F << 28)
-
-#define DC_COLOR_KEY 0xB8
-#define DC_CLR_KEY_ENABLE (1 << 24)
-
-
-#define DC3_DV_LINE_SIZE_MASK 0x00000C00
-#define DC3_DV_LINE_SIZE_1024 0x00000000
-#define DC3_DV_LINE_SIZE_2048 0x00000400
-#define DC3_DV_LINE_SIZE_4096 0x00000800
-#define DC3_DV_LINE_SIZE_8192 0x00000C00
-
-#define DF_VIDEO_CFG 0x0
-#define DF_VCFG_VID_EN (1 << 0)
-
-#define DF_DISPLAY_CFG 0x08
-
-#define DF_DCFG_CRT_EN (1 << 0)
-#define DF_DCFG_HSYNC_EN (1 << 1)
-#define DF_DCFG_VSYNC_EN (1 << 2)
-#define DF_DCFG_DAC_BL_EN (1 << 3)
-#define DF_DCFG_CRT_HSYNC_POL (1 << 8)
-#define DF_DCFG_CRT_VSYNC_POL (1 << 9)
-#define DF_DCFG_GV_PAL_BYP (1 << 21)
+enum fp_registers {
+ FP_PT1 = 0,
+ FP_PT2,
-#define DF_DCFG_CRT_SYNC_SKW_INIT 0x10000
-#define DF_DCFG_CRT_SYNC_SKW_MASK 0x1c000
+ FP_PM,
+ FP_DFC,
-#define DF_DCFG_PWR_SEQ_DLY_INIT 0x80000
-#define DF_DCFG_PWR_SEQ_DLY_MASK 0xe0000
+ FP_RSVD_0,
+ FP_RSVD_1,
-#define DF_MISC 0x50
+ FP_RSVD_2,
+ FP_RSVD_3,
+
+ FP_RSVD_4,
+ FP_DCA,
+
+ FP_DMD,
+ FP_CRC, /* 0x458 */
+};
+
+#define FP_PT2_SCRC (1 << 27) /* shfclk free */
+
+#define FP_PM_P (1 << 24) /* panel power ctl */
+#define FP_PM_PANEL_PWR_UP (1 << 3) /* r/o */
+#define FP_PM_PANEL_PWR_DOWN (1 << 2) /* r/o */
+#define FP_PM_PANEL_OFF (1 << 1) /* r/o */
+#define FP_PM_PANEL_ON (1 << 0) /* r/o */
+
+#define FP_DFC_BC ((1 << 4) | (1 << 5) | (1 << 6))
+
+
+/* register access functions */
+
+static inline uint32_t read_gp(struct lxfb_par *par, int reg)
+{
+ return readl(par->gp_regs + 4*reg);
+}
+
+static inline void write_gp(struct lxfb_par *par, int reg, uint32_t val)
+{
+ writel(val, par->gp_regs + 4*reg);
+}
+
+static inline uint32_t read_dc(struct lxfb_par *par, int reg)
+{
+ return readl(par->dc_regs + 4*reg);
+}
+
+static inline void write_dc(struct lxfb_par *par, int reg, uint32_t val)
+{
+ writel(val, par->dc_regs + 4*reg);
+}
+
+static inline uint32_t read_vp(struct lxfb_par *par, int reg)
+{
+ return readl(par->vp_regs + 8*reg);
+}
+
+static inline void write_vp(struct lxfb_par *par, int reg, uint32_t val)
+{
+ writel(val, par->vp_regs + 8*reg);
+}
+
+static inline uint32_t read_fp(struct lxfb_par *par, int reg)
+{
+ return readl(par->vp_regs + 8*reg + VP_FP_START);
+}
+
+static inline void write_fp(struct lxfb_par *par, int reg, uint32_t val)
+{
+ writel(val, par->vp_regs + 8*reg + VP_FP_START);
+}
-#define DF_MISC_GAM_BYPASS (1 << 0)
-#define DF_MISC_DAC_PWRDN (1 << 10)
-#define DF_MISC_A_PWRDN (1 << 11)
-#define DF_PAR 0x38
-#define DF_PDR 0x40
-#define DF_ALPHA_CONTROL_1 0xD8
-#define DF_VIDEO_REQUEST 0x120
+/* MSRs are defined in asm/geode.h; their bitfields are here */
-#define DF_PANEL_TIM1 0x400
-#define DF_DEFAULT_TFT_PMTIM1 0x0
+#define MSR_GLCP_DOTPLL_LOCK (1 << 25) /* r/o */
+#define MSR_GLCP_DOTPLL_HALFPIX (1 << 24)
+#define MSR_GLCP_DOTPLL_BYPASS (1 << 15)
+#define MSR_GLCP_DOTPLL_DOTRESET (1 << 0)
-#define DF_PANEL_TIM2 0x408
-#define DF_DEFAULT_TFT_PMTIM2 0x08000000
+/* note: this is actually the VP's GLD_MSR_CONFIG */
+#define MSR_LX_GLD_MSR_CONFIG_FMT ((1 << 3) | (1 << 4) | (1 << 5))
+#define MSR_LX_GLD_MSR_CONFIG_FMT_FP (1 << 3)
+#define MSR_LX_GLD_MSR_CONFIG_FMT_CRT (0)
+#define MSR_LX_GLD_MSR_CONFIG_FPC (1 << 15) /* FP *and* CRT */
-#define DF_FP_PM 0x410
-#define DF_FP_PM_P (1 << 24)
+#define MSR_LX_MSR_PADSEL_TFT_SEL_LOW 0xDFFFFFFF /* ??? */
+#define MSR_LX_MSR_PADSEL_TFT_SEL_HIGH 0x0000003F /* ??? */
-#define DF_DITHER_CONTROL 0x418
-#define DF_DEFAULT_TFT_DITHCTL 0x00000070
-#define GP_BLT_STATUS 0x44
-#define GP_BS_BLT_BUSY (1 << 0)
-#define GP_BS_CB_EMPTY (1 << 4)
+#define MSR_LX_SPARE_MSR_DIS_CFIFO_HGO (1 << 11) /* undocumented */
+#define MSR_LX_SPARE_MSR_VFIFO_ARB_SEL (1 << 10) /* undocumented */
+#define MSR_LX_SPARE_MSR_WM_LPEN_OVRD (1 << 9) /* undocumented */
+#define MSR_LX_SPARE_MSR_LOAD_WM_LPEN_M (1 << 8) /* undocumented */
+#define MSR_LX_SPARE_MSR_DIS_INIT_V_PRI (1 << 7) /* undocumented */
+#define MSR_LX_SPARE_MSR_DIS_VIFO_WM (1 << 6)
+#define MSR_LX_SPARE_MSR_DIS_CWD_CHECK (1 << 5) /* undocumented */
+#define MSR_LX_SPARE_MSR_PIX8_PAN_FIX (1 << 4) /* undocumented */
+#define MSR_LX_SPARE_MSR_FIRST_REQ_MASK (1 << 1) /* undocumented */
#endif
diff --git a/drivers/video/geode/lxfb_core.c b/drivers/video/geode/lxfb_core.c
index eb6b88171538..2cd9b74d2225 100644
--- a/drivers/video/geode/lxfb_core.c
+++ b/drivers/video/geode/lxfb_core.c
@@ -17,6 +17,7 @@
#include <linux/console.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
@@ -27,14 +28,15 @@
static char *mode_option;
static int noclear, nopanel, nocrt;
-static int fbsize;
+static int vram;
+static int vt_switch;
/* Most of these modes are sorted in ascending order, but
* since the first entry in this table is the "default" mode,
* we try to make it something sane - 640x480-60 is sane
*/
-static const struct fb_videomode geode_modedb[] __initdata = {
+static struct fb_videomode geode_modedb[] __initdata = {
/* 640x480-60 */
{ NULL, 60, 640, 480, 39682, 48, 8, 25, 2, 88, 2,
FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
@@ -215,6 +217,35 @@ static const struct fb_videomode geode_modedb[] __initdata = {
0, FB_VMODE_NONINTERLACED, 0 },
};
+#ifdef CONFIG_OLPC
+#include <asm/olpc.h>
+
+static struct fb_videomode olpc_dcon_modedb[] __initdata = {
+ /* The only mode the DCON has is 1200x900 */
+ { NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED, 0 }
+};
+
+static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size)
+{
+ if (olpc_has_dcon()) {
+ *modedb = (struct fb_videomode *) olpc_dcon_modedb;
+ *size = ARRAY_SIZE(olpc_dcon_modedb);
+ } else {
+ *modedb = (struct fb_videomode *) geode_modedb;
+ *size = ARRAY_SIZE(geode_modedb);
+ }
+}
+
+#else
+static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size)
+{
+ *modedb = (struct fb_videomode *) geode_modedb;
+ *size = ARRAY_SIZE(geode_modedb);
+}
+#endif
+
static int lxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
if (var->xres > 1920 || var->yres > 1440)
@@ -333,13 +364,13 @@ static int __init lxfb_map_video_memory(struct fb_info *info,
if (ret)
return ret;
- ret = pci_request_region(dev, 3, "lxfb-vip");
+ ret = pci_request_region(dev, 3, "lxfb-vp");
if (ret)
return ret;
info->fix.smem_start = pci_resource_start(dev, 0);
- info->fix.smem_len = fbsize ? fbsize : lx_framebuffer_size();
+ info->fix.smem_len = vram ? vram : lx_framebuffer_size();
info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
@@ -360,18 +391,15 @@ static int __init lxfb_map_video_memory(struct fb_info *info,
if (par->dc_regs == NULL)
return ret;
- par->df_regs = ioremap(pci_resource_start(dev, 3),
+ par->vp_regs = ioremap(pci_resource_start(dev, 3),
pci_resource_len(dev, 3));
- if (par->df_regs == NULL)
+ if (par->vp_regs == NULL)
return ret;
- writel(DC_UNLOCK_CODE, par->dc_regs + DC_UNLOCK);
-
- writel(info->fix.smem_start & 0xFF000000,
- par->dc_regs + DC_PHY_MEM_OFFSET);
-
- writel(0, par->dc_regs + DC_UNLOCK);
+ write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
+ write_dc(par, DC_GLIU0_MEM_OFFSET, info->fix.smem_start & 0xFF000000);
+ write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
dev_info(&dev->dev, "%d KB of video memory at 0x%lx\n",
info->fix.smem_len / 1024, info->fix.smem_start);
@@ -431,6 +459,45 @@ static struct fb_info * __init lxfb_init_fbinfo(struct device *dev)
return info;
}
+#ifdef CONFIG_PM
+static int lxfb_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct fb_info *info = pci_get_drvdata(pdev);
+
+ if (state.event == PM_EVENT_SUSPEND) {
+ acquire_console_sem();
+ lx_powerdown(info);
+ fb_set_suspend(info, 1);
+ release_console_sem();
+ }
+
+ /* there's no point in setting PCI states; we emulate PCI, so
+ * we don't end up getting power savings anyways */
+
+ return 0;
+}
+
+static int lxfb_resume(struct pci_dev *pdev)
+{
+ struct fb_info *info = pci_get_drvdata(pdev);
+ int ret;
+
+ acquire_console_sem();
+ ret = lx_powerup(info);
+ if (ret) {
+ printk(KERN_ERR "lxfb: power up failed!\n");
+ return ret;
+ }
+
+ fb_set_suspend(info, 0);
+ release_console_sem();
+ return 0;
+}
+#else
+#define lxfb_suspend NULL
+#define lxfb_resume NULL
+#endif
+
static int __init lxfb_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -439,7 +506,7 @@ static int __init lxfb_probe(struct pci_dev *pdev,
int ret;
struct fb_videomode *modedb_ptr;
- int modedb_size;
+ unsigned int modedb_size;
info = lxfb_init_fbinfo(&pdev->dev);
@@ -464,9 +531,7 @@ static int __init lxfb_probe(struct pci_dev *pdev,
/* Set up the mode database */
- modedb_ptr = (struct fb_videomode *) geode_modedb;
- modedb_size = ARRAY_SIZE(geode_modedb);
-
+ get_modedb(&modedb_ptr, &modedb_size);
ret = fb_find_mode(&info->var, info, mode_option,
modedb_ptr, modedb_size, NULL, 16);
@@ -487,6 +552,8 @@ static int __init lxfb_probe(struct pci_dev *pdev,
lxfb_check_var(&info->var, info);
lxfb_set_par(info);
+ pm_set_vt_switch(vt_switch);
+
if (register_framebuffer(info) < 0) {
ret = -EINVAL;
goto err;
@@ -510,8 +577,8 @@ err:
iounmap(par->dc_regs);
pci_release_region(pdev, 2);
}
- if (par->df_regs) {
- iounmap(par->df_regs);
+ if (par->vp_regs) {
+ iounmap(par->vp_regs);
pci_release_region(pdev, 3);
}
@@ -537,7 +604,7 @@ static void lxfb_remove(struct pci_dev *pdev)
iounmap(par->dc_regs);
pci_release_region(pdev, 2);
- iounmap(par->df_regs);
+ iounmap(par->vp_regs);
pci_release_region(pdev, 3);
pci_set_drvdata(pdev, NULL);
@@ -556,6 +623,8 @@ static struct pci_driver lxfb_driver = {
.id_table = lxfb_id_table,
.probe = lxfb_probe,
.remove = lxfb_remove,
+ .suspend = lxfb_suspend,
+ .resume = lxfb_resume,
};
#ifndef MODULE
@@ -570,9 +639,7 @@ static int __init lxfb_setup(char *options)
if (!*opt)
continue;
- if (!strncmp(opt, "fbsize:", 7))
- fbsize = simple_strtoul(opt+7, NULL, 0);
- else if (!strcmp(opt, "noclear"))
+ if (!strcmp(opt, "noclear"))
noclear = 1;
else if (!strcmp(opt, "nopanel"))
nopanel = 1;
@@ -609,8 +676,11 @@ module_exit(lxfb_cleanup);
module_param(mode_option, charp, 0);
MODULE_PARM_DESC(mode_option, "video mode (<x>x<y>[-<bpp>][@<refr>])");
-module_param(fbsize, int, 0);
-MODULE_PARM_DESC(fbsize, "video memory size");
+module_param(vram, int, 0);
+MODULE_PARM_DESC(vram, "video memory size");
+
+module_param(vt_switch, int, 0);
+MODULE_PARM_DESC(vt_switch, "enable VT switch during suspend/resume");
MODULE_DESCRIPTION("Framebuffer driver for the AMD Geode LX");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/geode/lxfb_ops.c b/drivers/video/geode/lxfb_ops.c
index 4fbc99be96ef..cd9d4cc26954 100644
--- a/drivers/video/geode/lxfb_ops.c
+++ b/drivers/video/geode/lxfb_ops.c
@@ -13,6 +13,7 @@
#include <linux/fb.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
+#include <asm/geode.h>
#include "lxfb.h"
@@ -34,35 +35,85 @@ static const struct {
unsigned int pllval;
unsigned int freq;
} pll_table[] = {
- { 0x000031AC, 24923 },
- { 0x0000215D, 25175 },
- { 0x00001087, 27000 },
- { 0x0000216C, 28322 },
- { 0x0000218D, 28560 },
- { 0x000010C9, 31200 },
- { 0x00003147, 31500 },
- { 0x000010A7, 33032 },
- { 0x00002159, 35112 },
- { 0x00004249, 35500 },
- { 0x00000057, 36000 },
- { 0x0000219A, 37889 },
- { 0x00002158, 39168 },
- { 0x00000045, 40000 },
- { 0x00000089, 43163 },
- { 0x000010E7, 44900 },
- { 0x00002136, 45720 },
- { 0x00003207, 49500 },
- { 0x00002187, 50000 },
- { 0x00004286, 56250 },
- { 0x000010E5, 60065 },
- { 0x00004214, 65000 },
- { 0x00001105, 68179 },
- { 0x000031E4, 74250 },
- { 0x00003183, 75000 },
- { 0x00004284, 78750 },
- { 0x00001104, 81600 },
- { 0x00006363, 94500 },
- { 0x00005303, 97520 },
+ { 0x000131AC, 6231 },
+ { 0x0001215D, 6294 },
+ { 0x00011087, 6750 },
+ { 0x0001216C, 7081 },
+ { 0x0001218D, 7140 },
+ { 0x000110C9, 7800 },
+ { 0x00013147, 7875 },
+ { 0x000110A7, 8258 },
+ { 0x00012159, 8778 },
+ { 0x00014249, 8875 },
+ { 0x00010057, 9000 },
+ { 0x0001219A, 9472 },
+ { 0x00012158, 9792 },
+ { 0x00010045, 10000 },
+ { 0x00010089, 10791 },
+ { 0x000110E7, 11225 },
+ { 0x00012136, 11430 },
+ { 0x00013207, 12375 },
+ { 0x00012187, 12500 },
+ { 0x00014286, 14063 },
+ { 0x000110E5, 15016 },
+ { 0x00014214, 16250 },
+ { 0x00011105, 17045 },
+ { 0x000131E4, 18563 },
+ { 0x00013183, 18750 },
+ { 0x00014284, 19688 },
+ { 0x00011104, 20400 },
+ { 0x00016363, 23625 },
+ { 0x00015303, 24380 },
+ { 0x000031AC, 24923 },
+ { 0x0000215D, 25175 },
+ { 0x00001087, 27000 },
+ { 0x0000216C, 28322 },
+ { 0x0000218D, 28560 },
+ { 0x00010041, 29913 },
+ { 0x000010C9, 31200 },
+ { 0x00003147, 31500 },
+ { 0x000141A1, 32400 },
+ { 0x000010A7, 33032 },
+ { 0x00012182, 33375 },
+ { 0x000141B1, 33750 },
+ { 0x00002159, 35112 },
+ { 0x00004249, 35500 },
+ { 0x00000057, 36000 },
+ { 0x000141E1, 37125 },
+ { 0x0000219A, 37889 },
+ { 0x00002158, 39168 },
+ { 0x00000045, 40000 },
+ { 0x000131A1, 40500 },
+ { 0x00010061, 42301 },
+ { 0x00000089, 43163 },
+ { 0x00012151, 43875 },
+ { 0x000010E7, 44900 },
+ { 0x00002136, 45720 },
+ { 0x000152E1, 47250 },
+ { 0x00010071, 48000 },
+ { 0x00003207, 49500 },
+ { 0x00002187, 50000 },
+ { 0x00014291, 50625 },
+ { 0x00011101, 51188 },
+ { 0x00017481, 54563 },
+ { 0x00004286, 56250 },
+ { 0x00014170, 57375 },
+ { 0x00016210, 58500 },
+ { 0x000010E5, 60065 },
+ { 0x00013140, 62796 },
+ { 0x00004214, 65000 },
+ { 0x00016250, 65250 },
+ { 0x00001105, 68179 },
+ { 0x000141C0, 69600 },
+ { 0x00015220, 70160 },
+ { 0x00010050, 72000 },
+ { 0x000031E4, 74250 },
+ { 0x00003183, 75000 },
+ { 0x00004284, 78750 },
+ { 0x00012130, 80052 },
+ { 0x00001104, 81600 },
+ { 0x00006363, 94500 },
+ { 0x00005303, 97520 },
{ 0x00002183, 100187 },
{ 0x00002122, 101420 },
{ 0x00001081, 108000 },
@@ -101,16 +152,16 @@ static void lx_set_dotpll(u32 pllval)
u32 dotpll_lo, dotpll_hi;
int i;
- rdmsr(MSR_LX_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
+ rdmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
- if ((dotpll_lo & GLCP_DOTPLL_LOCK) && (dotpll_hi == pllval))
+ if ((dotpll_lo & MSR_GLCP_DOTPLL_LOCK) && (dotpll_hi == pllval))
return;
dotpll_hi = pllval;
- dotpll_lo &= ~(GLCP_DOTPLL_BYPASS | GLCP_DOTPLL_HALFPIX);
- dotpll_lo |= GLCP_DOTPLL_RESET;
+ dotpll_lo &= ~(MSR_GLCP_DOTPLL_BYPASS | MSR_GLCP_DOTPLL_HALFPIX);
+ dotpll_lo |= MSR_GLCP_DOTPLL_DOTRESET;
- wrmsr(MSR_LX_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
+ wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
/* Wait 100us for the PLL to lock */
@@ -119,15 +170,15 @@ static void lx_set_dotpll(u32 pllval)
/* Now, loop for the lock bit */
for (i = 0; i < 1000; i++) {
- rdmsr(MSR_LX_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
- if (dotpll_lo & GLCP_DOTPLL_LOCK)
+ rdmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
+ if (dotpll_lo & MSR_GLCP_DOTPLL_LOCK)
break;
}
/* Clear the reset bit */
- dotpll_lo &= ~GLCP_DOTPLL_RESET;
- wrmsr(MSR_LX_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
+ dotpll_lo &= ~MSR_GLCP_DOTPLL_DOTRESET;
+ wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
}
/* Set the clock based on the frequency specified by the current mode */
@@ -137,7 +188,7 @@ static void lx_set_clock(struct fb_info *info)
unsigned int diff, min, best = 0;
unsigned int freq, i;
- freq = (unsigned int) (0x3b9aca00 / info->var.pixclock);
+ freq = (unsigned int) (1000000000 / info->var.pixclock);
min = abs(pll_table[0].freq - freq);
@@ -149,7 +200,7 @@ static void lx_set_clock(struct fb_info *info)
}
}
- lx_set_dotpll(pll_table[best].pllval & 0x7FFF);
+ lx_set_dotpll(pll_table[best].pllval & 0x00017FFF);
}
static void lx_graphics_disable(struct fb_info *info)
@@ -159,63 +210,62 @@ static void lx_graphics_disable(struct fb_info *info)
/* Note: This assumes that the video is in a quitet state */
- writel(0, par->df_regs + DF_ALPHA_CONTROL_1);
- writel(0, par->df_regs + DF_ALPHA_CONTROL_1 + 32);
- writel(0, par->df_regs + DF_ALPHA_CONTROL_1 + 64);
+ write_vp(par, VP_A1T, 0);
+ write_vp(par, VP_A2T, 0);
+ write_vp(par, VP_A3T, 0);
/* Turn off the VGA and video enable */
- val = readl (par->dc_regs + DC_GENERAL_CFG) &
- ~(DC_GCFG_VGAE | DC_GCFG_VIDE);
+ val = read_dc(par, DC_GENERAL_CFG) & ~(DC_GENERAL_CFG_VGAE |
+ DC_GENERAL_CFG_VIDE);
- writel(val, par->dc_regs + DC_GENERAL_CFG);
+ write_dc(par, DC_GENERAL_CFG, val);
- val = readl(par->df_regs + DF_VIDEO_CFG) & ~DF_VCFG_VID_EN;
- writel(val, par->df_regs + DF_VIDEO_CFG);
+ val = read_vp(par, VP_VCFG) & ~VP_VCFG_VID_EN;
+ write_vp(par, VP_VCFG, val);
- writel( DC_IRQ_MASK | DC_VSYNC_IRQ_MASK |
- DC_IRQ_STATUS | DC_VSYNC_IRQ_STATUS,
- par->dc_regs + DC_IRQ);
+ write_dc(par, DC_IRQ, DC_IRQ_MASK | DC_IRQ_VIP_VSYNC_LOSS_IRQ_MASK |
+ DC_IRQ_STATUS | DC_IRQ_VIP_VSYNC_IRQ_STATUS);
- val = readl(par->dc_regs + DC_GENLCK_CTRL) & ~DC_GENLCK_ENABLE;
- writel(val, par->dc_regs + DC_GENLCK_CTRL);
+ val = read_dc(par, DC_GENLK_CTL) & ~DC_GENLK_CTL_GENLK_EN;
+ write_dc(par, DC_GENLK_CTL, val);
- val = readl(par->dc_regs + DC_COLOR_KEY) & ~DC_CLR_KEY_ENABLE;
- writel(val & ~DC_CLR_KEY_ENABLE, par->dc_regs + DC_COLOR_KEY);
+ val = read_dc(par, DC_CLR_KEY);
+ write_dc(par, DC_CLR_KEY, val & ~DC_CLR_KEY_CLR_KEY_EN);
- /* We don't actually blank the panel, due to the long latency
- involved with bringing it back */
+ /* turn off the panel */
+ write_fp(par, FP_PM, read_fp(par, FP_PM) & ~FP_PM_P);
- val = readl(par->df_regs + DF_MISC) | DF_MISC_DAC_PWRDN;
- writel(val, par->df_regs + DF_MISC);
+ val = read_vp(par, VP_MISC) | VP_MISC_DACPWRDN;
+ write_vp(par, VP_MISC, val);
/* Turn off the display */
- val = readl(par->df_regs + DF_DISPLAY_CFG);
- writel(val & ~(DF_DCFG_CRT_EN | DF_DCFG_HSYNC_EN | DF_DCFG_VSYNC_EN |
- DF_DCFG_DAC_BL_EN), par->df_regs + DF_DISPLAY_CFG);
+ val = read_vp(par, VP_DCFG);
+ write_vp(par, VP_DCFG, val & ~(VP_DCFG_CRT_EN | VP_DCFG_HSYNC_EN |
+ VP_DCFG_VSYNC_EN | VP_DCFG_DAC_BL_EN));
- gcfg = readl(par->dc_regs + DC_GENERAL_CFG);
- gcfg &= ~(DC_GCFG_CMPE | DC_GCFG_DECE);
- writel(gcfg, par->dc_regs + DC_GENERAL_CFG);
+ gcfg = read_dc(par, DC_GENERAL_CFG);
+ gcfg &= ~(DC_GENERAL_CFG_CMPE | DC_GENERAL_CFG_DECE);
+ write_dc(par, DC_GENERAL_CFG, gcfg);
/* Turn off the TGEN */
- val = readl(par->dc_regs + DC_DISPLAY_CFG);
- val &= ~DC_DCFG_TGEN;
- writel(val, par->dc_regs + DC_DISPLAY_CFG);
+ val = read_dc(par, DC_DISPLAY_CFG);
+ val &= ~DC_DISPLAY_CFG_TGEN;
+ write_dc(par, DC_DISPLAY_CFG, val);
/* Wait 1000 usecs to ensure that the TGEN is clear */
udelay(1000);
/* Turn off the FIFO loader */
- gcfg &= ~DC_GCFG_DFLE;
- writel(gcfg, par->dc_regs + DC_GENERAL_CFG);
+ gcfg &= ~DC_GENERAL_CFG_DFLE;
+ write_dc(par, DC_GENERAL_CFG, gcfg);
/* Lastly, wait for the GP to go idle */
do {
- val = readl(par->gp_regs + GP_BLT_STATUS);
- } while ((val & GP_BS_BLT_BUSY) || !(val & GP_BS_CB_EMPTY));
+ val = read_gp(par, GP_BLT_STATUS);
+ } while ((val & GP_BLT_STATUS_PB) || !(val & GP_BLT_STATUS_CE));
}
static void lx_graphics_enable(struct fb_info *info)
@@ -224,80 +274,85 @@ static void lx_graphics_enable(struct fb_info *info)
u32 temp, config;
/* Set the video request register */
- writel(0, par->df_regs + DF_VIDEO_REQUEST);
+ write_vp(par, VP_VRR, 0);
/* Set up the polarities */
- config = readl(par->df_regs + DF_DISPLAY_CFG);
+ config = read_vp(par, VP_DCFG);
- config &= ~(DF_DCFG_CRT_SYNC_SKW_MASK | DF_DCFG_PWR_SEQ_DLY_MASK |
- DF_DCFG_CRT_HSYNC_POL | DF_DCFG_CRT_VSYNC_POL);
+ config &= ~(VP_DCFG_CRT_SYNC_SKW | VP_DCFG_PWR_SEQ_DELAY |
+ VP_DCFG_CRT_HSYNC_POL | VP_DCFG_CRT_VSYNC_POL);
- config |= (DF_DCFG_CRT_SYNC_SKW_INIT | DF_DCFG_PWR_SEQ_DLY_INIT |
- DF_DCFG_GV_PAL_BYP);
+ config |= (VP_DCFG_CRT_SYNC_SKW_DEFAULT | VP_DCFG_PWR_SEQ_DELAY_DEFAULT
+ | VP_DCFG_GV_GAM);
if (info->var.sync & FB_SYNC_HOR_HIGH_ACT)
- config |= DF_DCFG_CRT_HSYNC_POL;
+ config |= VP_DCFG_CRT_HSYNC_POL;
if (info->var.sync & FB_SYNC_VERT_HIGH_ACT)
- config |= DF_DCFG_CRT_VSYNC_POL;
+ config |= VP_DCFG_CRT_VSYNC_POL;
if (par->output & OUTPUT_PANEL) {
u32 msrlo, msrhi;
- writel(DF_DEFAULT_TFT_PMTIM1,
- par->df_regs + DF_PANEL_TIM1);
- writel(DF_DEFAULT_TFT_PMTIM2,
- par->df_regs + DF_PANEL_TIM2);
- writel(DF_DEFAULT_TFT_DITHCTL,
- par->df_regs + DF_DITHER_CONTROL);
+ write_fp(par, FP_PT1, 0);
+ write_fp(par, FP_PT2, FP_PT2_SCRC);
+ write_fp(par, FP_DFC, FP_DFC_BC);
- msrlo = DF_DEFAULT_TFT_PAD_SEL_LOW;
- msrhi = DF_DEFAULT_TFT_PAD_SEL_HIGH;
+ msrlo = MSR_LX_MSR_PADSEL_TFT_SEL_LOW;
+ msrhi = MSR_LX_MSR_PADSEL_TFT_SEL_HIGH;
- wrmsr(MSR_LX_DF_PADSEL, msrlo, msrhi);
+ wrmsr(MSR_LX_MSR_PADSEL, msrlo, msrhi);
}
if (par->output & OUTPUT_CRT) {
- config |= DF_DCFG_CRT_EN | DF_DCFG_HSYNC_EN |
- DF_DCFG_VSYNC_EN | DF_DCFG_DAC_BL_EN;
+ config |= VP_DCFG_CRT_EN | VP_DCFG_HSYNC_EN |
+ VP_DCFG_VSYNC_EN | VP_DCFG_DAC_BL_EN;
}
- writel(config, par->df_regs + DF_DISPLAY_CFG);
+ write_vp(par, VP_DCFG, config);
/* Turn the CRT dacs back on */
if (par->output & OUTPUT_CRT) {
- temp = readl(par->df_regs + DF_MISC);
- temp &= ~(DF_MISC_DAC_PWRDN | DF_MISC_A_PWRDN);
- writel(temp, par->df_regs + DF_MISC);
+ temp = read_vp(par, VP_MISC);
+ temp &= ~(VP_MISC_DACPWRDN | VP_MISC_APWRDN);
+ write_vp(par, VP_MISC, temp);
}
/* Turn the panel on (if it isn't already) */
-
- if (par->output & OUTPUT_PANEL) {
- temp = readl(par->df_regs + DF_FP_PM);
-
- if (!(temp & 0x09))
- writel(temp | DF_FP_PM_P, par->df_regs + DF_FP_PM);
- }
-
- temp = readl(par->df_regs + DF_MISC);
- temp = readl(par->df_regs + DF_DISPLAY_CFG);
+ if (par->output & OUTPUT_PANEL)
+ write_fp(par, FP_PM, read_fp(par, FP_PM) | FP_PM_P);
}
unsigned int lx_framebuffer_size(void)
{
unsigned int val;
+ if (!geode_has_vsa2()) {
+ uint32_t hi, lo;
+
+ /* The number of pages is (PMAX - PMIN)+1 */
+ rdmsr(MSR_GLIU_P2D_RO0, lo, hi);
+
+ /* PMAX */
+ val = ((hi & 0xff) << 12) | ((lo & 0xfff00000) >> 20);
+ /* PMIN */
+ val -= (lo & 0x000fffff);
+ val += 1;
+
+ /* The page size is 4k */
+ return (val << 12);
+ }
+
/* The frame buffer size is reported by a VSM in VSA II */
/* Virtual Register Class = 0x02 */
/* VG_MEM_SIZE (1MB units) = 0x00 */
- outw(0xFC53, 0xAC1C);
- outw(0x0200, 0xAC1C);
+ outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
+ outw(VSA_VR_MEM_SIZE, VSA_VRC_INDEX);
- val = (unsigned int)(inw(0xAC1E)) & 0xFE;
+ val = (unsigned int)(inw(VSA_VRC_DATA)) & 0xFE;
return (val << 20);
}
@@ -313,7 +368,7 @@ void lx_set_mode(struct fb_info *info)
int vactive, vblankstart, vsyncstart, vsyncend, vblankend, vtotal;
/* Unlock the DC registers */
- writel(DC_UNLOCK_CODE, par->dc_regs + DC_UNLOCK);
+ write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
lx_graphics_disable(info);
@@ -321,102 +376,104 @@ void lx_set_mode(struct fb_info *info)
/* Set output mode */
- rdmsrl(MSR_LX_DF_GLCONFIG, msrval);
- msrval &= ~DF_CONFIG_OUTPUT_MASK;
+ rdmsrl(MSR_LX_GLD_MSR_CONFIG, msrval);
+ msrval &= ~MSR_LX_GLD_MSR_CONFIG_FMT;
if (par->output & OUTPUT_PANEL) {
- msrval |= DF_OUTPUT_PANEL;
+ msrval |= MSR_LX_GLD_MSR_CONFIG_FMT_FP;
if (par->output & OUTPUT_CRT)
- msrval |= DF_SIMULTANEOUS_CRT_AND_FP;
+ msrval |= MSR_LX_GLD_MSR_CONFIG_FPC;
else
- msrval &= ~DF_SIMULTANEOUS_CRT_AND_FP;
- } else {
- msrval |= DF_OUTPUT_CRT;
- }
+ msrval &= ~MSR_LX_GLD_MSR_CONFIG_FPC;
+ } else
+ msrval |= MSR_LX_GLD_MSR_CONFIG_FMT_CRT;
- wrmsrl(MSR_LX_DF_GLCONFIG, msrval);
+ wrmsrl(MSR_LX_GLD_MSR_CONFIG, msrval);
/* Clear the various buffers */
/* FIXME: Adjust for panning here */
- writel(0, par->dc_regs + DC_FB_START);
- writel(0, par->dc_regs + DC_CB_START);
- writel(0, par->dc_regs + DC_CURSOR_START);
+ write_dc(par, DC_FB_ST_OFFSET, 0);
+ write_dc(par, DC_CB_ST_OFFSET, 0);
+ write_dc(par, DC_CURS_ST_OFFSET, 0);
/* FIXME: Add support for interlacing */
/* FIXME: Add support for scaling */
- val = readl(par->dc_regs + DC_GENLCK_CTRL);
- val &= ~(DC_GC_ALPHA_FLICK_ENABLE |
- DC_GC_FLICKER_FILTER_ENABLE | DC_GC_FLICKER_FILTER_MASK);
+ val = read_dc(par, DC_GENLK_CTL);
+ val &= ~(DC_GENLK_CTL_ALPHA_FLICK_EN | DC_GENLK_CTL_FLICK_EN |
+ DC_GENLK_CTL_FLICK_SEL_MASK);
/* Default scaling params */
- writel((0x4000 << 16) | 0x4000, par->dc_regs + DC_GFX_SCALE);
- writel(0, par->dc_regs + DC_IRQ_FILT_CTL);
- writel(val, par->dc_regs + DC_GENLCK_CTRL);
+ write_dc(par, DC_GFX_SCALE, (0x4000 << 16) | 0x4000);
+ write_dc(par, DC_IRQ_FILT_CTL, 0);
+ write_dc(par, DC_GENLK_CTL, val);
/* FIXME: Support compression */
if (info->fix.line_length > 4096)
- dv = DC_DV_LINE_SIZE_8192;
+ dv = DC_DV_CTL_DV_LINE_SIZE_8K;
else if (info->fix.line_length > 2048)
- dv = DC_DV_LINE_SIZE_4096;
+ dv = DC_DV_CTL_DV_LINE_SIZE_4K;
else if (info->fix.line_length > 1024)
- dv = DC_DV_LINE_SIZE_2048;
+ dv = DC_DV_CTL_DV_LINE_SIZE_2K;
else
- dv = DC_DV_LINE_SIZE_1024;
+ dv = DC_DV_CTL_DV_LINE_SIZE_1K;
max = info->fix.line_length * info->var.yres;
max = (max + 0x3FF) & 0xFFFFFC00;
- writel(max | DC_DV_TOP_ENABLE, par->dc_regs + DC_DV_TOP);
+ write_dc(par, DC_DV_TOP, max | DC_DV_TOP_DV_TOP_EN);
- val = readl(par->dc_regs + DC_DV_CTL) & ~DC_DV_LINE_SIZE_MASK;
- writel(val | dv, par->dc_regs + DC_DV_CTL);
+ val = read_dc(par, DC_DV_CTL) & ~DC_DV_CTL_DV_LINE_SIZE;
+ write_dc(par, DC_DV_CTL, val | dv);
size = info->var.xres * (info->var.bits_per_pixel >> 3);
- writel(info->fix.line_length >> 3, par->dc_regs + DC_GRAPHICS_PITCH);
- writel((size + 7) >> 3, par->dc_regs + DC_LINE_SIZE);
+ write_dc(par, DC_GFX_PITCH, info->fix.line_length >> 3);
+ write_dc(par, DC_LINE_SIZE, (size + 7) >> 3);
/* Set default watermark values */
- rdmsrl(MSR_LX_DC_SPARE, msrval);
-
- msrval &= ~(DC_SPARE_DISABLE_CFIFO_HGO | DC_SPARE_VFIFO_ARB_SELECT |
- DC_SPARE_LOAD_WM_LPEN_MASK | DC_SPARE_WM_LPEN_OVRD |
- DC_SPARE_DISABLE_INIT_VID_PRI | DC_SPARE_DISABLE_VFIFO_WM);
- msrval |= DC_SPARE_DISABLE_VFIFO_WM | DC_SPARE_DISABLE_INIT_VID_PRI;
- wrmsrl(MSR_LX_DC_SPARE, msrval);
-
- gcfg = DC_GCFG_DFLE; /* Display fifo enable */
- gcfg |= 0xB600; /* Set default priority */
- gcfg |= DC_GCFG_FDTY; /* Set the frame dirty mode */
-
- dcfg = DC_DCFG_VDEN; /* Enable video data */
- dcfg |= DC_DCFG_GDEN; /* Enable graphics */
- dcfg |= DC_DCFG_TGEN; /* Turn on the timing generator */
- dcfg |= DC_DCFG_TRUP; /* Update timings immediately */
- dcfg |= DC_DCFG_PALB; /* Palette bypass in > 8 bpp modes */
- dcfg |= DC_DCFG_VISL;
- dcfg |= DC_DCFG_DCEN; /* Always center the display */
+ rdmsrl(MSR_LX_SPARE_MSR, msrval);
+
+ msrval &= ~(MSR_LX_SPARE_MSR_DIS_CFIFO_HGO
+ | MSR_LX_SPARE_MSR_VFIFO_ARB_SEL
+ | MSR_LX_SPARE_MSR_LOAD_WM_LPEN_M
+ | MSR_LX_SPARE_MSR_WM_LPEN_OVRD);
+ msrval |= MSR_LX_SPARE_MSR_DIS_VIFO_WM |
+ MSR_LX_SPARE_MSR_DIS_INIT_V_PRI;
+ wrmsrl(MSR_LX_SPARE_MSR, msrval);
+
+ gcfg = DC_GENERAL_CFG_DFLE; /* Display fifo enable */
+ gcfg |= (0x6 << DC_GENERAL_CFG_DFHPSL_SHIFT) | /* default priority */
+ (0xb << DC_GENERAL_CFG_DFHPEL_SHIFT);
+ gcfg |= DC_GENERAL_CFG_FDTY; /* Set the frame dirty mode */
+
+ dcfg = DC_DISPLAY_CFG_VDEN; /* Enable video data */
+ dcfg |= DC_DISPLAY_CFG_GDEN; /* Enable graphics */
+ dcfg |= DC_DISPLAY_CFG_TGEN; /* Turn on the timing generator */
+ dcfg |= DC_DISPLAY_CFG_TRUP; /* Update timings immediately */
+ dcfg |= DC_DISPLAY_CFG_PALB; /* Palette bypass in > 8 bpp modes */
+ dcfg |= DC_DISPLAY_CFG_VISL;
+ dcfg |= DC_DISPLAY_CFG_DCEN; /* Always center the display */
/* Set the current BPP mode */
switch (info->var.bits_per_pixel) {
case 8:
- dcfg |= DC_DCFG_DISP_MODE_8BPP;
+ dcfg |= DC_DISPLAY_CFG_DISP_MODE_8BPP;
break;
case 16:
- dcfg |= DC_DCFG_DISP_MODE_16BPP | DC_DCFG_16BPP;
+ dcfg |= DC_DISPLAY_CFG_DISP_MODE_16BPP;
break;
case 32:
case 24:
- dcfg |= DC_DCFG_DISP_MODE_24BPP;
+ dcfg |= DC_DISPLAY_CFG_DISP_MODE_24BPP;
break;
}
@@ -436,35 +493,31 @@ void lx_set_mode(struct fb_info *info)
vblankend = vsyncend + info->var.upper_margin;
vtotal = vblankend;
- writel((hactive - 1) | ((htotal - 1) << 16),
- par->dc_regs + DC_H_ACTIVE_TIMING);
- writel((hblankstart - 1) | ((hblankend - 1) << 16),
- par->dc_regs + DC_H_BLANK_TIMING);
- writel((hsyncstart - 1) | ((hsyncend - 1) << 16),
- par->dc_regs + DC_H_SYNC_TIMING);
-
- writel((vactive - 1) | ((vtotal - 1) << 16),
- par->dc_regs + DC_V_ACTIVE_TIMING);
+ write_dc(par, DC_H_ACTIVE_TIMING, (hactive - 1) | ((htotal - 1) << 16));
+ write_dc(par, DC_H_BLANK_TIMING,
+ (hblankstart - 1) | ((hblankend - 1) << 16));
+ write_dc(par, DC_H_SYNC_TIMING,
+ (hsyncstart - 1) | ((hsyncend - 1) << 16));
- writel((vblankstart - 1) | ((vblankend - 1) << 16),
- par->dc_regs + DC_V_BLANK_TIMING);
+ write_dc(par, DC_V_ACTIVE_TIMING, (vactive - 1) | ((vtotal - 1) << 16));
+ write_dc(par, DC_V_BLANK_TIMING,
+ (vblankstart - 1) | ((vblankend - 1) << 16));
+ write_dc(par, DC_V_SYNC_TIMING,
+ (vsyncstart - 1) | ((vsyncend - 1) << 16));
- writel((vsyncstart - 1) | ((vsyncend - 1) << 16),
- par->dc_regs + DC_V_SYNC_TIMING);
-
- writel( (info->var.xres - 1) << 16 | (info->var.yres - 1),
- par->dc_regs + DC_FB_ACTIVE);
+ write_dc(par, DC_FB_ACTIVE,
+ (info->var.xres - 1) << 16 | (info->var.yres - 1));
/* And re-enable the graphics output */
lx_graphics_enable(info);
/* Write the two main configuration registers */
- writel(dcfg, par->dc_regs + DC_DISPLAY_CFG);
- writel(0, par->dc_regs + DC_ARB_CFG);
- writel(gcfg, par->dc_regs + DC_GENERAL_CFG);
+ write_dc(par, DC_DISPLAY_CFG, dcfg);
+ write_dc(par, DC_ARB_CFG, 0);
+ write_dc(par, DC_GENERAL_CFG, gcfg);
/* Lock the DC registers */
- writel(0, par->dc_regs + DC_UNLOCK);
+ write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
}
void lx_set_palette_reg(struct fb_info *info, unsigned regno,
@@ -479,58 +532,310 @@ void lx_set_palette_reg(struct fb_info *info, unsigned regno,
val |= (green) & 0x00ff00;
val |= (blue >> 8) & 0x0000ff;
- writel(regno, par->dc_regs + DC_PAL_ADDRESS);
- writel(val, par->dc_regs + DC_PAL_DATA);
+ write_dc(par, DC_PAL_ADDRESS, regno);
+ write_dc(par, DC_PAL_DATA, val);
}
int lx_blank_display(struct fb_info *info, int blank_mode)
{
struct lxfb_par *par = info->par;
u32 dcfg, fp_pm;
- int blank, hsync, vsync;
+ int blank, hsync, vsync, crt;
/* CRT power saving modes. */
switch (blank_mode) {
case FB_BLANK_UNBLANK:
- blank = 0; hsync = 1; vsync = 1;
+ blank = 0; hsync = 1; vsync = 1; crt = 1;
break;
case FB_BLANK_NORMAL:
- blank = 1; hsync = 1; vsync = 1;
+ blank = 1; hsync = 1; vsync = 1; crt = 1;
break;
case FB_BLANK_VSYNC_SUSPEND:
- blank = 1; hsync = 1; vsync = 0;
+ blank = 1; hsync = 1; vsync = 0; crt = 1;
break;
case FB_BLANK_HSYNC_SUSPEND:
- blank = 1; hsync = 0; vsync = 1;
+ blank = 1; hsync = 0; vsync = 1; crt = 1;
break;
case FB_BLANK_POWERDOWN:
- blank = 1; hsync = 0; vsync = 0;
+ blank = 1; hsync = 0; vsync = 0; crt = 0;
break;
default:
return -EINVAL;
}
- dcfg = readl(par->df_regs + DF_DISPLAY_CFG);
- dcfg &= ~(DF_DCFG_DAC_BL_EN
- | DF_DCFG_HSYNC_EN | DF_DCFG_VSYNC_EN);
+ dcfg = read_vp(par, VP_DCFG);
+ dcfg &= ~(VP_DCFG_DAC_BL_EN | VP_DCFG_HSYNC_EN | VP_DCFG_VSYNC_EN |
+ VP_DCFG_CRT_EN);
if (!blank)
- dcfg |= DF_DCFG_DAC_BL_EN;
+ dcfg |= VP_DCFG_DAC_BL_EN;
if (hsync)
- dcfg |= DF_DCFG_HSYNC_EN;
+ dcfg |= VP_DCFG_HSYNC_EN;
if (vsync)
- dcfg |= DF_DCFG_VSYNC_EN;
- writel(dcfg, par->df_regs + DF_DISPLAY_CFG);
+ dcfg |= VP_DCFG_VSYNC_EN;
+ if (crt)
+ dcfg |= VP_DCFG_CRT_EN;
+ write_vp(par, VP_DCFG, dcfg);
/* Power on/off flat panel */
if (par->output & OUTPUT_PANEL) {
- fp_pm = readl(par->df_regs + DF_FP_PM);
+ fp_pm = read_fp(par, FP_PM);
if (blank_mode == FB_BLANK_POWERDOWN)
- fp_pm &= ~DF_FP_PM_P;
+ fp_pm &= ~FP_PM_P;
else
- fp_pm |= DF_FP_PM_P;
- writel(fp_pm, par->df_regs + DF_FP_PM);
+ fp_pm |= FP_PM_P;
+ write_fp(par, FP_PM, fp_pm);
}
return 0;
}
+
+#ifdef CONFIG_PM
+
+static void lx_save_regs(struct lxfb_par *par)
+{
+ uint32_t filt;
+ int i;
+
+ /* wait for the BLT engine to stop being busy */
+ do {
+ i = read_gp(par, GP_BLT_STATUS);
+ } while ((i & GP_BLT_STATUS_PB) || !(i & GP_BLT_STATUS_CE));
+
+ /* save MSRs */
+ rdmsrl(MSR_LX_MSR_PADSEL, par->msr.padsel);
+ rdmsrl(MSR_GLCP_DOTPLL, par->msr.dotpll);
+ rdmsrl(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg);
+ rdmsrl(MSR_LX_SPARE_MSR, par->msr.dcspare);
+
+ write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
+
+ /* save registers */
+ memcpy(par->gp, par->gp_regs, sizeof(par->gp));
+ memcpy(par->dc, par->dc_regs, sizeof(par->dc));
+ memcpy(par->vp, par->vp_regs, sizeof(par->vp));
+ memcpy(par->fp, par->vp_regs + VP_FP_START, sizeof(par->fp));
+
+ /* save the palette */
+ write_dc(par, DC_PAL_ADDRESS, 0);
+ for (i = 0; i < ARRAY_SIZE(par->pal); i++)
+ par->pal[i] = read_dc(par, DC_PAL_DATA);
+
+ /* save the horizontal filter coefficients */
+ filt = par->dc[DC_IRQ_FILT_CTL] | DC_IRQ_FILT_CTL_H_FILT_SEL;
+ for (i = 0; i < ARRAY_SIZE(par->hcoeff); i += 2) {
+ write_dc(par, DC_IRQ_FILT_CTL, (filt & 0xffffff00) | i);
+ par->hcoeff[i] = read_dc(par, DC_FILT_COEFF1);
+ par->hcoeff[i + 1] = read_dc(par, DC_FILT_COEFF2);
+ }
+
+ /* save the vertical filter coefficients */
+ filt &= ~DC_IRQ_FILT_CTL_H_FILT_SEL;
+ for (i = 0; i < ARRAY_SIZE(par->vcoeff); i++) {
+ write_dc(par, DC_IRQ_FILT_CTL, (filt & 0xffffff00) | i);
+ par->vcoeff[i] = read_dc(par, DC_FILT_COEFF1);
+ }
+
+ /* save video coeff ram */
+ memcpy(par->vp_coeff, par->vp_regs + VP_VCR, sizeof(par->vp_coeff));
+}
+
+static void lx_restore_gfx_proc(struct lxfb_par *par)
+{
+ int i;
+
+ /* a bunch of registers require GP_RASTER_MODE to be set first */
+ write_gp(par, GP_RASTER_MODE, par->gp[GP_RASTER_MODE]);
+
+ for (i = 0; i < ARRAY_SIZE(par->gp); i++) {
+ switch (i) {
+ case GP_RASTER_MODE:
+ case GP_VECTOR_MODE:
+ case GP_BLT_MODE:
+ case GP_BLT_STATUS:
+ case GP_HST_SRC:
+ /* FIXME: restore LUT data */
+ case GP_LUT_INDEX:
+ case GP_LUT_DATA:
+ /* don't restore these registers */
+ break;
+
+ default:
+ write_gp(par, i, par->gp[i]);
+ }
+ }
+}
+
+static void lx_restore_display_ctlr(struct lxfb_par *par)
+{
+ uint32_t filt;
+ int i;
+
+ wrmsrl(MSR_LX_SPARE_MSR, par->msr.dcspare);
+
+ for (i = 0; i < ARRAY_SIZE(par->dc); i++) {
+ switch (i) {
+ case DC_UNLOCK:
+ /* unlock the DC; runs first */
+ write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
+ break;
+
+ case DC_GENERAL_CFG:
+ case DC_DISPLAY_CFG:
+ /* disable all while restoring */
+ write_dc(par, i, 0);
+ break;
+
+ case DC_DV_CTL:
+ /* set all ram to dirty */
+ write_dc(par, i, par->dc[i] | DC_DV_CTL_CLEAR_DV_RAM);
+
+ case DC_RSVD_1:
+ case DC_RSVD_2:
+ case DC_RSVD_3:
+ case DC_LINE_CNT:
+ case DC_PAL_ADDRESS:
+ case DC_PAL_DATA:
+ case DC_DFIFO_DIAG:
+ case DC_CFIFO_DIAG:
+ case DC_FILT_COEFF1:
+ case DC_FILT_COEFF2:
+ case DC_RSVD_4:
+ case DC_RSVD_5:
+ /* don't restore these registers */
+ break;
+
+ default:
+ write_dc(par, i, par->dc[i]);
+ }
+ }
+
+ /* restore the palette */
+ write_dc(par, DC_PAL_ADDRESS, 0);
+ for (i = 0; i < ARRAY_SIZE(par->pal); i++)
+ write_dc(par, DC_PAL_DATA, par->pal[i]);
+
+ /* restore the horizontal filter coefficients */
+ filt = par->dc[DC_IRQ_FILT_CTL] | DC_IRQ_FILT_CTL_H_FILT_SEL;
+ for (i = 0; i < ARRAY_SIZE(par->hcoeff); i += 2) {
+ write_dc(par, DC_IRQ_FILT_CTL, (filt & 0xffffff00) | i);
+ write_dc(par, DC_FILT_COEFF1, par->hcoeff[i]);
+ write_dc(par, DC_FILT_COEFF2, par->hcoeff[i + 1]);
+ }
+
+ /* restore the vertical filter coefficients */
+ filt &= ~DC_IRQ_FILT_CTL_H_FILT_SEL;
+ for (i = 0; i < ARRAY_SIZE(par->vcoeff); i++) {
+ write_dc(par, DC_IRQ_FILT_CTL, (filt & 0xffffff00) | i);
+ write_dc(par, DC_FILT_COEFF1, par->vcoeff[i]);
+ }
+}
+
+static void lx_restore_video_proc(struct lxfb_par *par)
+{
+ int i;
+
+ wrmsrl(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg);
+ wrmsrl(MSR_LX_MSR_PADSEL, par->msr.padsel);
+
+ for (i = 0; i < ARRAY_SIZE(par->vp); i++) {
+ switch (i) {
+ case VP_VCFG:
+ case VP_DCFG:
+ case VP_PAR:
+ case VP_PDR:
+ case VP_CCS:
+ case VP_RSVD_0:
+ /* case VP_VDC: */ /* why should this not be restored? */
+ case VP_RSVD_1:
+ case VP_CRC32:
+ /* don't restore these registers */
+ break;
+
+ default:
+ write_vp(par, i, par->vp[i]);
+ }
+ }
+
+ /* restore video coeff ram */
+ memcpy(par->vp_regs + VP_VCR, par->vp_coeff, sizeof(par->vp_coeff));
+}
+
+static void lx_restore_regs(struct lxfb_par *par)
+{
+ int i;
+
+ lx_set_dotpll((u32) (par->msr.dotpll >> 32));
+ lx_restore_gfx_proc(par);
+ lx_restore_display_ctlr(par);
+ lx_restore_video_proc(par);
+
+ /* Flat Panel */
+ for (i = 0; i < ARRAY_SIZE(par->fp); i++) {
+ switch (i) {
+ case FP_PM:
+ case FP_RSVD_0:
+ case FP_RSVD_1:
+ case FP_RSVD_2:
+ case FP_RSVD_3:
+ case FP_RSVD_4:
+ /* don't restore these registers */
+ break;
+
+ default:
+ write_fp(par, i, par->fp[i]);
+ }
+ }
+
+ /* control the panel */
+ if (par->fp[FP_PM] & FP_PM_P) {
+ /* power on the panel if not already power{ed,ing} on */
+ if (!(read_fp(par, FP_PM) &
+ (FP_PM_PANEL_ON|FP_PM_PANEL_PWR_UP)))
+ write_fp(par, FP_PM, par->fp[FP_PM]);
+ } else {
+ /* power down the panel if not already power{ed,ing} down */
+ if (!(read_fp(par, FP_PM) &
+ (FP_PM_PANEL_OFF|FP_PM_PANEL_PWR_DOWN)))
+ write_fp(par, FP_PM, par->fp[FP_PM]);
+ }
+
+ /* turn everything on */
+ write_vp(par, VP_VCFG, par->vp[VP_VCFG]);
+ write_vp(par, VP_DCFG, par->vp[VP_DCFG]);
+ write_dc(par, DC_DISPLAY_CFG, par->dc[DC_DISPLAY_CFG]);
+ /* do this last; it will enable the FIFO load */
+ write_dc(par, DC_GENERAL_CFG, par->dc[DC_GENERAL_CFG]);
+
+ /* lock the door behind us */
+ write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
+}
+
+int lx_powerdown(struct fb_info *info)
+{
+ struct lxfb_par *par = info->par;
+
+ if (par->powered_down)
+ return 0;
+
+ lx_save_regs(par);
+ lx_graphics_disable(info);
+
+ par->powered_down = 1;
+ return 0;
+}
+
+int lx_powerup(struct fb_info *info)
+{
+ struct lxfb_par *par = info->par;
+
+ if (!par->powered_down)
+ return 0;
+
+ lx_restore_regs(par);
+
+ par->powered_down = 0;
+ return 0;
+}
+
+#endif
diff --git a/drivers/video/geode/suspend_gx.c b/drivers/video/geode/suspend_gx.c
new file mode 100644
index 000000000000..9aff32ef8bb6
--- /dev/null
+++ b/drivers/video/geode/suspend_gx.c
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ * Copyright (C) 2008 Andres Salomon <dilinger@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/fb.h>
+#include <asm/io.h>
+#include <asm/msr.h>
+#include <asm/geode.h>
+#include <asm/delay.h>
+
+#include "gxfb.h"
+
+#ifdef CONFIG_PM
+
+static void gx_save_regs(struct gxfb_par *par)
+{
+ int i;
+
+ /* wait for the BLT engine to stop being busy */
+ do {
+ i = read_gp(par, GP_BLT_STATUS);
+ } while (i & (GP_BLT_STATUS_BLT_PENDING | GP_BLT_STATUS_BLT_BUSY));
+
+ /* save MSRs */
+ rdmsrl(MSR_GX_MSR_PADSEL, par->msr.padsel);
+ rdmsrl(MSR_GLCP_DOTPLL, par->msr.dotpll);
+
+ write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
+
+ /* save registers */
+ memcpy(par->gp, par->gp_regs, sizeof(par->gp));
+ memcpy(par->dc, par->dc_regs, sizeof(par->dc));
+ memcpy(par->vp, par->vid_regs, sizeof(par->vp));
+ memcpy(par->fp, par->vid_regs + VP_FP_START, sizeof(par->fp));
+
+ /* save the palette */
+ write_dc(par, DC_PAL_ADDRESS, 0);
+ for (i = 0; i < ARRAY_SIZE(par->pal); i++)
+ par->pal[i] = read_dc(par, DC_PAL_DATA);
+}
+
+static void gx_set_dotpll(uint32_t dotpll_hi)
+{
+ uint32_t dotpll_lo;
+ int i;
+
+ rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo);
+ dotpll_lo |= MSR_GLCP_DOTPLL_DOTRESET;
+ dotpll_lo &= ~MSR_GLCP_DOTPLL_BYPASS;
+ wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
+
+ /* wait for the PLL to lock */
+ for (i = 0; i < 200; i++) {
+ rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo);
+ if (dotpll_lo & MSR_GLCP_DOTPLL_LOCK)
+ break;
+ udelay(1);
+ }
+
+ /* PLL set, unlock */
+ dotpll_lo &= ~MSR_GLCP_DOTPLL_DOTRESET;
+ wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
+}
+
+static void gx_restore_gfx_proc(struct gxfb_par *par)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(par->gp); i++) {
+ switch (i) {
+ case GP_VECTOR_MODE:
+ case GP_BLT_MODE:
+ case GP_BLT_STATUS:
+ case GP_HST_SRC:
+ /* don't restore these registers */
+ break;
+ default:
+ write_gp(par, i, par->gp[i]);
+ }
+ }
+}
+
+static void gx_restore_display_ctlr(struct gxfb_par *par)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(par->dc); i++) {
+ switch (i) {
+ case DC_UNLOCK:
+ /* unlock the DC; runs first */
+ write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
+ break;
+
+ case DC_GENERAL_CFG:
+ /* write without the enables */
+ write_dc(par, i, par->dc[i] & ~(DC_GENERAL_CFG_VIDE |
+ DC_GENERAL_CFG_ICNE |
+ DC_GENERAL_CFG_CURE |
+ DC_GENERAL_CFG_DFLE));
+ break;
+
+ case DC_DISPLAY_CFG:
+ /* write without the enables */
+ write_dc(par, i, par->dc[i] & ~(DC_DISPLAY_CFG_VDEN |
+ DC_DISPLAY_CFG_GDEN |
+ DC_DISPLAY_CFG_TGEN));
+ break;
+
+ case DC_RSVD_0:
+ case DC_RSVD_1:
+ case DC_RSVD_2:
+ case DC_RSVD_3:
+ case DC_RSVD_4:
+ case DC_LINE_CNT:
+ case DC_PAL_ADDRESS:
+ case DC_PAL_DATA:
+ case DC_DFIFO_DIAG:
+ case DC_CFIFO_DIAG:
+ case DC_RSVD_5:
+ /* don't restore these registers */
+ break;
+ default:
+ write_dc(par, i, par->dc[i]);
+ }
+ }
+
+ /* restore the palette */
+ write_dc(par, DC_PAL_ADDRESS, 0);
+ for (i = 0; i < ARRAY_SIZE(par->pal); i++)
+ write_dc(par, DC_PAL_DATA, par->pal[i]);
+}
+
+static void gx_restore_video_proc(struct gxfb_par *par)
+{
+ int i;
+
+ wrmsrl(MSR_GX_MSR_PADSEL, par->msr.padsel);
+
+ for (i = 0; i < ARRAY_SIZE(par->vp); i++) {
+ switch (i) {
+ case VP_VCFG:
+ /* don't enable video yet */
+ write_vp(par, i, par->vp[i] & ~VP_VCFG_VID_EN);
+ break;
+
+ case VP_DCFG:
+ /* don't enable CRT yet */
+ write_vp(par, i, par->vp[i] &
+ ~(VP_DCFG_DAC_BL_EN | VP_DCFG_VSYNC_EN |
+ VP_DCFG_HSYNC_EN | VP_DCFG_CRT_EN));
+ break;
+
+ case VP_GAR:
+ case VP_GDR:
+ case VP_RSVD_0:
+ case VP_RSVD_1:
+ case VP_RSVD_2:
+ case VP_RSVD_3:
+ case VP_CRC32:
+ case VP_AWT:
+ case VP_VTM:
+ /* don't restore these registers */
+ break;
+ default:
+ write_vp(par, i, par->vp[i]);
+ }
+ }
+}
+
+static void gx_restore_regs(struct gxfb_par *par)
+{
+ int i;
+
+ gx_set_dotpll((uint32_t) (par->msr.dotpll >> 32));
+ gx_restore_gfx_proc(par);
+ gx_restore_display_ctlr(par);
+ gx_restore_video_proc(par);
+
+ /* Flat Panel */
+ for (i = 0; i < ARRAY_SIZE(par->fp); i++) {
+ if (i != FP_PM && i != FP_RSVD_0)
+ write_fp(par, i, par->fp[i]);
+ }
+}
+
+static void gx_disable_graphics(struct gxfb_par *par)
+{
+ /* shut down the engine */
+ write_vp(par, VP_VCFG, par->vp[VP_VCFG] & ~VP_VCFG_VID_EN);
+ write_vp(par, VP_DCFG, par->vp[VP_DCFG] & ~(VP_DCFG_DAC_BL_EN |
+ VP_DCFG_VSYNC_EN | VP_DCFG_HSYNC_EN | VP_DCFG_CRT_EN));
+
+ /* turn off the flat panel */
+ write_fp(par, FP_PM, par->fp[FP_PM] & ~FP_PM_P);
+
+
+ /* turn off display */
+ write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
+ write_dc(par, DC_GENERAL_CFG, par->dc[DC_GENERAL_CFG] &
+ ~(DC_GENERAL_CFG_VIDE | DC_GENERAL_CFG_ICNE |
+ DC_GENERAL_CFG_CURE | DC_GENERAL_CFG_DFLE));
+ write_dc(par, DC_DISPLAY_CFG, par->dc[DC_DISPLAY_CFG] &
+ ~(DC_DISPLAY_CFG_VDEN | DC_DISPLAY_CFG_GDEN |
+ DC_DISPLAY_CFG_TGEN));
+ write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
+}
+
+static void gx_enable_graphics(struct gxfb_par *par)
+{
+ uint32_t fp;
+
+ fp = read_fp(par, FP_PM);
+ if (par->fp[FP_PM] & FP_PM_P) {
+ /* power on the panel if not already power{ed,ing} on */
+ if (!(fp & (FP_PM_PANEL_ON|FP_PM_PANEL_PWR_UP)))
+ write_fp(par, FP_PM, par->fp[FP_PM]);
+ } else {
+ /* power down the panel if not already power{ed,ing} down */
+ if (!(fp & (FP_PM_PANEL_OFF|FP_PM_PANEL_PWR_DOWN)))
+ write_fp(par, FP_PM, par->fp[FP_PM]);
+ }
+
+ /* turn everything on */
+ write_vp(par, VP_VCFG, par->vp[VP_VCFG]);
+ write_vp(par, VP_DCFG, par->vp[VP_DCFG]);
+ write_dc(par, DC_DISPLAY_CFG, par->dc[DC_DISPLAY_CFG]);
+ /* do this last; it will enable the FIFO load */
+ write_dc(par, DC_GENERAL_CFG, par->dc[DC_GENERAL_CFG]);
+
+ /* lock the door behind us */
+ write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
+}
+
+int gx_powerdown(struct fb_info *info)
+{
+ struct gxfb_par *par = info->par;
+
+ if (par->powered_down)
+ return 0;
+
+ gx_save_regs(par);
+ gx_disable_graphics(par);
+
+ par->powered_down = 1;
+ return 0;
+}
+
+int gx_powerup(struct fb_info *info)
+{
+ struct gxfb_par *par = info->par;
+
+ if (!par->powered_down)
+ return 0;
+
+ gx_restore_regs(par);
+ gx_enable_graphics(par);
+
+ par->powered_down = 0;
+ return 0;
+}
+
+#endif
diff --git a/drivers/video/geode/video_gx.c b/drivers/video/geode/video_gx.c
index febf09c63492..b8d52a8360db 100644
--- a/drivers/video/geode/video_gx.c
+++ b/drivers/video/geode/video_gx.c
@@ -16,9 +16,9 @@
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/msr.h>
+#include <asm/geode.h>
-#include "geodefb.h"
-#include "video_gx.h"
+#include "gxfb.h"
/*
@@ -117,7 +117,7 @@ static const struct gx_pll_entry gx_pll_table_14MHz[] = {
{ 4357, 0, 0x0000057D }, /* 229.5000 */
};
-static void gx_set_dclk_frequency(struct fb_info *info)
+void gx_set_dclk_frequency(struct fb_info *info)
{
const struct gx_pll_entry *pll_table;
int pll_table_len;
@@ -178,110 +178,116 @@ static void gx_set_dclk_frequency(struct fb_info *info)
static void
gx_configure_tft(struct fb_info *info)
{
- struct geodefb_par *par = info->par;
+ struct gxfb_par *par = info->par;
unsigned long val;
unsigned long fp;
/* Set up the DF pad select MSR */
- rdmsrl(GX_VP_MSR_PAD_SELECT, val);
- val &= ~GX_VP_PAD_SELECT_MASK;
- val |= GX_VP_PAD_SELECT_TFT;
- wrmsrl(GX_VP_MSR_PAD_SELECT, val);
+ rdmsrl(MSR_GX_MSR_PADSEL, val);
+ val &= ~MSR_GX_MSR_PADSEL_MASK;
+ val |= MSR_GX_MSR_PADSEL_TFT;
+ wrmsrl(MSR_GX_MSR_PADSEL, val);
/* Turn off the panel */
- fp = readl(par->vid_regs + GX_FP_PM);
- fp &= ~GX_FP_PM_P;
- writel(fp, par->vid_regs + GX_FP_PM);
+ fp = read_fp(par, FP_PM);
+ fp &= ~FP_PM_P;
+ write_fp(par, FP_PM, fp);
/* Set timing 1 */
- fp = readl(par->vid_regs + GX_FP_PT1);
- fp &= GX_FP_PT1_VSIZE_MASK;
- fp |= info->var.yres << GX_FP_PT1_VSIZE_SHIFT;
- writel(fp, par->vid_regs + GX_FP_PT1);
+ fp = read_fp(par, FP_PT1);
+ fp &= FP_PT1_VSIZE_MASK;
+ fp |= info->var.yres << FP_PT1_VSIZE_SHIFT;
+ write_fp(par, FP_PT1, fp);
/* Timing 2 */
/* Set bits that are always on for TFT */
fp = 0x0F100000;
- /* Add sync polarity */
+ /* Configure sync polarity */
if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT))
- fp |= GX_FP_PT2_VSP;
+ fp |= FP_PT2_VSP;
if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT))
- fp |= GX_FP_PT2_HSP;
+ fp |= FP_PT2_HSP;
- writel(fp, par->vid_regs + GX_FP_PT2);
+ write_fp(par, FP_PT2, fp);
/* Set the dither control */
- writel(0x70, par->vid_regs + GX_FP_DFC);
+ write_fp(par, FP_DFC, FP_DFC_NFI);
/* Enable the FP data and power (in case the BIOS didn't) */
- fp = readl(par->vid_regs + GX_DCFG);
- fp |= GX_DCFG_FP_PWR_EN | GX_DCFG_FP_DATA_EN;
- writel(fp, par->vid_regs + GX_DCFG);
+ fp = read_vp(par, VP_DCFG);
+ fp |= VP_DCFG_FP_PWR_EN | VP_DCFG_FP_DATA_EN;
+ write_vp(par, VP_DCFG, fp);
/* Unblank the panel */
- fp = readl(par->vid_regs + GX_FP_PM);
- fp |= GX_FP_PM_P;
- writel(fp, par->vid_regs + GX_FP_PM);
+ fp = read_fp(par, FP_PM);
+ fp |= FP_PM_P;
+ write_fp(par, FP_PM, fp);
}
-static void gx_configure_display(struct fb_info *info)
+void gx_configure_display(struct fb_info *info)
{
- struct geodefb_par *par = info->par;
+ struct gxfb_par *par = info->par;
u32 dcfg, misc;
- /* Set up the MISC register */
-
- misc = readl(par->vid_regs + GX_MISC);
-
- /* Power up the DAC */
- misc &= ~(GX_MISC_A_PWRDN | GX_MISC_DAC_PWRDN);
-
- /* Disable gamma correction */
- misc |= GX_MISC_GAM_EN;
-
- writel(misc, par->vid_regs + GX_MISC);
-
/* Write the display configuration */
- dcfg = readl(par->vid_regs + GX_DCFG);
+ dcfg = read_vp(par, VP_DCFG);
/* Disable hsync and vsync */
- dcfg &= ~(GX_DCFG_VSYNC_EN | GX_DCFG_HSYNC_EN);
- writel(dcfg, par->vid_regs + GX_DCFG);
+ dcfg &= ~(VP_DCFG_VSYNC_EN | VP_DCFG_HSYNC_EN);
+ write_vp(par, VP_DCFG, dcfg);
/* Clear bits from existing mode. */
- dcfg &= ~(GX_DCFG_CRT_SYNC_SKW_MASK
- | GX_DCFG_CRT_HSYNC_POL | GX_DCFG_CRT_VSYNC_POL
- | GX_DCFG_VSYNC_EN | GX_DCFG_HSYNC_EN);
+ dcfg &= ~(VP_DCFG_CRT_SYNC_SKW
+ | VP_DCFG_CRT_HSYNC_POL | VP_DCFG_CRT_VSYNC_POL
+ | VP_DCFG_VSYNC_EN | VP_DCFG_HSYNC_EN);
/* Set default sync skew. */
- dcfg |= GX_DCFG_CRT_SYNC_SKW_DFLT;
+ dcfg |= VP_DCFG_CRT_SYNC_SKW_DEFAULT;
/* Enable hsync and vsync. */
- dcfg |= GX_DCFG_HSYNC_EN | GX_DCFG_VSYNC_EN;
+ dcfg |= VP_DCFG_HSYNC_EN | VP_DCFG_VSYNC_EN;
- /* Sync polarities. */
- if (info->var.sync & FB_SYNC_HOR_HIGH_ACT)
- dcfg |= GX_DCFG_CRT_HSYNC_POL;
- if (info->var.sync & FB_SYNC_VERT_HIGH_ACT)
- dcfg |= GX_DCFG_CRT_VSYNC_POL;
+ misc = read_vp(par, VP_MISC);
+
+ /* Disable gamma correction */
+ misc |= VP_MISC_GAM_EN;
+
+ if (par->enable_crt) {
+
+ /* Power up the CRT DACs */
+ misc &= ~(VP_MISC_APWRDN | VP_MISC_DACPWRDN);
+ write_vp(par, VP_MISC, misc);
+
+ /* Only change the sync polarities if we are running
+ * in CRT mode. The FP polarities will be handled in
+ * gxfb_configure_tft */
+ if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT))
+ dcfg |= VP_DCFG_CRT_HSYNC_POL;
+ if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT))
+ dcfg |= VP_DCFG_CRT_VSYNC_POL;
+ } else {
+ /* Power down the CRT DACs if in FP mode */
+ misc |= (VP_MISC_APWRDN | VP_MISC_DACPWRDN);
+ write_vp(par, VP_MISC, misc);
+ }
/* Enable the display logic */
/* Set up the DACS to blank normally */
- dcfg |= GX_DCFG_CRT_EN | GX_DCFG_DAC_BL_EN;
+ dcfg |= VP_DCFG_CRT_EN | VP_DCFG_DAC_BL_EN;
/* Enable the external DAC VREF? */
- writel(dcfg, par->vid_regs + GX_DCFG);
+ write_vp(par, VP_DCFG, dcfg);
/* Set up the flat panel (if it is enabled) */
@@ -289,59 +295,55 @@ static void gx_configure_display(struct fb_info *info)
gx_configure_tft(info);
}
-static int gx_blank_display(struct fb_info *info, int blank_mode)
+int gx_blank_display(struct fb_info *info, int blank_mode)
{
- struct geodefb_par *par = info->par;
+ struct gxfb_par *par = info->par;
u32 dcfg, fp_pm;
- int blank, hsync, vsync;
+ int blank, hsync, vsync, crt;
/* CRT power saving modes. */
switch (blank_mode) {
case FB_BLANK_UNBLANK:
- blank = 0; hsync = 1; vsync = 1;
+ blank = 0; hsync = 1; vsync = 1; crt = 1;
break;
case FB_BLANK_NORMAL:
- blank = 1; hsync = 1; vsync = 1;
+ blank = 1; hsync = 1; vsync = 1; crt = 1;
break;
case FB_BLANK_VSYNC_SUSPEND:
- blank = 1; hsync = 1; vsync = 0;
+ blank = 1; hsync = 1; vsync = 0; crt = 1;
break;
case FB_BLANK_HSYNC_SUSPEND:
- blank = 1; hsync = 0; vsync = 1;
+ blank = 1; hsync = 0; vsync = 1; crt = 1;
break;
case FB_BLANK_POWERDOWN:
- blank = 1; hsync = 0; vsync = 0;
+ blank = 1; hsync = 0; vsync = 0; crt = 0;
break;
default:
return -EINVAL;
}
- dcfg = readl(par->vid_regs + GX_DCFG);
- dcfg &= ~(GX_DCFG_DAC_BL_EN
- | GX_DCFG_HSYNC_EN | GX_DCFG_VSYNC_EN);
+ dcfg = read_vp(par, VP_DCFG);
+ dcfg &= ~(VP_DCFG_DAC_BL_EN | VP_DCFG_HSYNC_EN | VP_DCFG_VSYNC_EN |
+ VP_DCFG_CRT_EN);
if (!blank)
- dcfg |= GX_DCFG_DAC_BL_EN;
+ dcfg |= VP_DCFG_DAC_BL_EN;
if (hsync)
- dcfg |= GX_DCFG_HSYNC_EN;
+ dcfg |= VP_DCFG_HSYNC_EN;
if (vsync)
- dcfg |= GX_DCFG_VSYNC_EN;
- writel(dcfg, par->vid_regs + GX_DCFG);
+ dcfg |= VP_DCFG_VSYNC_EN;
+ if (crt)
+ dcfg |= VP_DCFG_CRT_EN;
+ write_vp(par, VP_DCFG, dcfg);
/* Power on/off flat panel. */
if (par->enable_crt == 0) {
- fp_pm = readl(par->vid_regs + GX_FP_PM);
+ fp_pm = read_fp(par, FP_PM);
if (blank_mode == FB_BLANK_POWERDOWN)
- fp_pm &= ~GX_FP_PM_P;
+ fp_pm &= ~FP_PM_P;
else
- fp_pm |= GX_FP_PM_P;
- writel(fp_pm, par->vid_regs + GX_FP_PM);
+ fp_pm |= FP_PM_P;
+ write_fp(par, FP_PM, fp_pm);
}
return 0;
}
-
-struct geode_vid_ops gx_vid_ops = {
- .set_dclk = gx_set_dclk_frequency,
- .configure_display = gx_configure_display,
- .blank_display = gx_blank_display,
-};
diff --git a/drivers/video/geode/video_gx.h b/drivers/video/geode/video_gx.h
deleted file mode 100644
index ce28d8f382dc..000000000000
--- a/drivers/video/geode/video_gx.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Geode GX video device
- *
- * Copyright (C) 2006 Arcom Control Systems Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#ifndef __VIDEO_GX_H__
-#define __VIDEO_GX_H__
-
-extern struct geode_vid_ops gx_vid_ops;
-
-/* GX Flatpanel control MSR */
-#define GX_VP_MSR_PAD_SELECT 0xC0002011
-#define GX_VP_PAD_SELECT_MASK 0x3FFFFFFF
-#define GX_VP_PAD_SELECT_TFT 0x1FFFFFFF
-
-/* Geode GX video processor registers */
-
-#define GX_DCFG 0x0008
-# define GX_DCFG_CRT_EN 0x00000001
-# define GX_DCFG_HSYNC_EN 0x00000002
-# define GX_DCFG_VSYNC_EN 0x00000004
-# define GX_DCFG_DAC_BL_EN 0x00000008
-# define GX_DCFG_FP_PWR_EN 0x00000040
-# define GX_DCFG_FP_DATA_EN 0x00000080
-# define GX_DCFG_CRT_HSYNC_POL 0x00000100
-# define GX_DCFG_CRT_VSYNC_POL 0x00000200
-# define GX_DCFG_CRT_SYNC_SKW_MASK 0x0001C000
-# define GX_DCFG_CRT_SYNC_SKW_DFLT 0x00010000
-# define GX_DCFG_VG_CK 0x00100000
-# define GX_DCFG_GV_GAM 0x00200000
-# define GX_DCFG_DAC_VREF 0x04000000
-
-/* Geode GX MISC video configuration */
-
-#define GX_MISC 0x50
-#define GX_MISC_GAM_EN 0x00000001
-#define GX_MISC_DAC_PWRDN 0x00000400
-#define GX_MISC_A_PWRDN 0x00000800
-
-/* Geode GX flat panel display control registers */
-
-#define GX_FP_PT1 0x0400
-#define GX_FP_PT1_VSIZE_MASK 0x7FF0000
-#define GX_FP_PT1_VSIZE_SHIFT 16
-
-#define GX_FP_PT2 0x408
-#define GX_FP_PT2_VSP (1 << 23)
-#define GX_FP_PT2_HSP (1 << 22)
-
-#define GX_FP_PM 0x410
-# define GX_FP_PM_P 0x01000000
-
-#define GX_FP_DFC 0x418
-
-/* Geode GX clock control MSRs */
-
-#define MSR_GLCP_SYS_RSTPLL 0x4c000014
-# define MSR_GLCP_SYS_RSTPLL_DOTPREDIV2 (0x0000000000000002ull)
-# define MSR_GLCP_SYS_RSTPLL_DOTPREMULT2 (0x0000000000000004ull)
-# define MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 (0x0000000000000008ull)
-
-#define MSR_GLCP_DOTPLL 0x4c000015
-# define MSR_GLCP_DOTPLL_DOTRESET (0x0000000000000001ull)
-# define MSR_GLCP_DOTPLL_BYPASS (0x0000000000008000ull)
-# define MSR_GLCP_DOTPLL_LOCK (0x0000000002000000ull)
-
-#endif /* !__VIDEO_GX_H__ */
diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
index e92337bef50d..564557792bed 100644
--- a/drivers/video/gxt4500.c
+++ b/drivers/video/gxt4500.c
@@ -238,7 +238,7 @@ static int calc_pll(int period_ps, struct gxt4500_par *par)
for (pdiv1 = 1; pdiv1 <= 8; ++pdiv1) {
for (pdiv2 = 1; pdiv2 <= pdiv1; ++pdiv2) {
postdiv = pdiv1 * pdiv2;
- pll_period = (period_ps + postdiv - 1) / postdiv;
+ pll_period = DIV_ROUND_UP(period_ps, postdiv);
/* keep pll in range 350..600 MHz */
if (pll_period < 1666 || pll_period > 2857)
continue;
diff --git a/drivers/video/hecubafb.c b/drivers/video/hecubafb.c
index 94e0df8a6f60..0b4bffbe67c8 100644
--- a/drivers/video/hecubafb.c
+++ b/drivers/video/hecubafb.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/video/hecubafb.c -- FB driver for Hecuba controller
+ * linux/drivers/video/hecubafb.c -- FB driver for Hecuba/Apollo controller
*
* Copyright (C) 2006, Jaya Kumar
* This work was sponsored by CIS(M) Sdn Bhd
@@ -17,18 +17,13 @@
* values. There are other commands that the display is capable of,
* beyond the 5 used here but they are more complex.
*
- * This driver is written to be used with the Hecuba display controller
- * board, and tested with the EInk 800x600 display in 1 bit mode.
- * The interface between Hecuba and the host is TTL based GPIO. The
- * GPIO requirements are 8 writable data lines and 6 lines for control.
- * Only 4 of the controls are actually used here but 6 for future use.
- * The driver requires the IO addresses for data and control GPIO at
- * load time. It is also possible to use this display with a standard
- * PC parallel port.
+ * This driver is written to be used with the Hecuba display architecture.
+ * The actual display chip is called Apollo and the interface electronics
+ * it needs is called Hecuba.
*
- * General notes:
- * - User must set hecubafb_enable=1 to enable it
- * - User must set dio_addr=0xIOADDR cio_addr=0xIOADDR c2io_addr=0xIOADDR
+ * It is intended to be architecture independent. A board specific driver
+ * must be used to perform all the physical IO interactions. An example
+ * is provided as n411.c
*
*/
@@ -47,34 +42,12 @@
#include <linux/list.h>
#include <linux/uaccess.h>
-/* Apollo controller specific defines */
-#define APOLLO_START_NEW_IMG 0xA0
-#define APOLLO_STOP_IMG_DATA 0xA1
-#define APOLLO_DISPLAY_IMG 0xA2
-#define APOLLO_ERASE_DISPLAY 0xA3
-#define APOLLO_INIT_DISPLAY 0xA4
-
-/* Hecuba interface specific defines */
-/* WUP is inverted, CD is inverted, DS is inverted */
-#define HCB_NWUP_BIT 0x01
-#define HCB_NDS_BIT 0x02
-#define HCB_RW_BIT 0x04
-#define HCB_NCD_BIT 0x08
-#define HCB_ACK_BIT 0x80
+#include <video/hecubafb.h>
/* Display specific information */
#define DPY_W 600
#define DPY_H 800
-struct hecubafb_par {
- unsigned long dio_addr;
- unsigned long cio_addr;
- unsigned long c2io_addr;
- unsigned char ctl;
- struct fb_info *info;
- unsigned int irq;
-};
-
static struct fb_fix_screeninfo hecubafb_fix __devinitdata = {
.id = "hecubafb",
.type = FB_TYPE_PACKED_PIXELS,
@@ -82,6 +55,7 @@ static struct fb_fix_screeninfo hecubafb_fix __devinitdata = {
.xpanstep = 0,
.ypanstep = 0,
.ywrapstep = 0,
+ .line_length = DPY_W,
.accel = FB_ACCEL_NONE,
};
@@ -94,136 +68,51 @@ static struct fb_var_screeninfo hecubafb_var __devinitdata = {
.nonstd = 1,
};
-static unsigned long dio_addr;
-static unsigned long cio_addr;
-static unsigned long c2io_addr;
-static unsigned long splashval;
-static unsigned int nosplash;
-static unsigned int hecubafb_enable;
-static unsigned int irq;
-
-static DECLARE_WAIT_QUEUE_HEAD(hecubafb_waitq);
-
-static void hcb_set_ctl(struct hecubafb_par *par)
-{
- outb(par->ctl, par->cio_addr);
-}
-
-static unsigned char hcb_get_ctl(struct hecubafb_par *par)
-{
- return inb(par->c2io_addr);
-}
-
-static void hcb_set_data(struct hecubafb_par *par, unsigned char value)
-{
- outb(value, par->dio_addr);
-}
-
-static int __devinit apollo_init_control(struct hecubafb_par *par)
-{
- unsigned char ctl;
- /* for init, we want the following setup to be set:
- WUP = lo
- ACK = hi
- DS = hi
- RW = hi
- CD = lo
- */
-
- /* write WUP to lo, DS to hi, RW to hi, CD to lo */
- par->ctl = HCB_NWUP_BIT | HCB_RW_BIT | HCB_NCD_BIT ;
- par->ctl &= ~HCB_NDS_BIT;
- hcb_set_ctl(par);
-
- /* check ACK is not lo */
- ctl = hcb_get_ctl(par);
- if ((ctl & HCB_ACK_BIT)) {
- printk(KERN_ERR "Fail because ACK is already low\n");
- return -ENXIO;
- }
-
- return 0;
-}
-
-static void hcb_wait_for_ack(struct hecubafb_par *par)
-{
-
- int timeout;
- unsigned char ctl;
-
- timeout=500;
- do {
- ctl = hcb_get_ctl(par);
- if ((ctl & HCB_ACK_BIT))
- return;
- udelay(1);
- } while (timeout--);
- printk(KERN_ERR "timed out waiting for ack\n");
-}
-
-static void hcb_wait_for_ack_clear(struct hecubafb_par *par)
-{
-
- int timeout;
- unsigned char ctl;
-
- timeout=500;
- do {
- ctl = hcb_get_ctl(par);
- if (!(ctl & HCB_ACK_BIT))
- return;
- udelay(1);
- } while (timeout--);
- printk(KERN_ERR "timed out waiting for clear\n");
-}
+/* main hecubafb functions */
static void apollo_send_data(struct hecubafb_par *par, unsigned char data)
{
/* set data */
- hcb_set_data(par, data);
+ par->board->set_data(par, data);
/* set DS low */
- par->ctl |= HCB_NDS_BIT;
- hcb_set_ctl(par);
+ par->board->set_ctl(par, HCB_DS_BIT, 0);
- hcb_wait_for_ack(par);
+ /* wait for ack */
+ par->board->wait_for_ack(par, 0);
/* set DS hi */
- par->ctl &= ~(HCB_NDS_BIT);
- hcb_set_ctl(par);
+ par->board->set_ctl(par, HCB_DS_BIT, 1);
- hcb_wait_for_ack_clear(par);
+ /* wait for ack to clear */
+ par->board->wait_for_ack(par, 1);
}
static void apollo_send_command(struct hecubafb_par *par, unsigned char data)
{
/* command so set CD to high */
- par->ctl &= ~(HCB_NCD_BIT);
- hcb_set_ctl(par);
+ par->board->set_ctl(par, HCB_CD_BIT, 1);
/* actually strobe with command */
apollo_send_data(par, data);
/* clear CD back to low */
- par->ctl |= (HCB_NCD_BIT);
- hcb_set_ctl(par);
+ par->board->set_ctl(par, HCB_CD_BIT, 0);
}
-/* main hecubafb functions */
-
static void hecubafb_dpy_update(struct hecubafb_par *par)
{
int i;
unsigned char *buf = (unsigned char __force *)par->info->screen_base;
- apollo_send_command(par, 0xA0);
+ apollo_send_command(par, APOLLO_START_NEW_IMG);
for (i=0; i < (DPY_W*DPY_H/8); i++) {
apollo_send_data(par, *(buf++));
}
- apollo_send_command(par, 0xA1);
- apollo_send_command(par, 0xA2);
+ apollo_send_command(par, APOLLO_STOP_IMG_DATA);
+ apollo_send_command(par, APOLLO_DISPLAY_IMG);
}
/* this is called back from the deferred io workqueue */
@@ -270,41 +159,43 @@ static void hecubafb_imageblit(struct fb_info *info,
static ssize_t hecubafb_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned long p;
- int err=-EINVAL;
- struct hecubafb_par *par;
- unsigned int xres;
- unsigned int fbmemlength;
+ struct hecubafb_par *par = info->par;
+ unsigned long p = *ppos;
+ void *dst;
+ int err = 0;
+ unsigned long total_size;
- p = *ppos;
- par = info->par;
- xres = info->var.xres;
- fbmemlength = (xres * info->var.yres)/8;
+ if (info->state != FBINFO_STATE_RUNNING)
+ return -EPERM;
- if (p > fbmemlength)
- return -ENOSPC;
+ total_size = info->fix.smem_len;
- err = 0;
- if ((count + p) > fbmemlength) {
- count = fbmemlength - p;
- err = -ENOSPC;
+ if (p > total_size)
+ return -EFBIG;
+
+ if (count > total_size) {
+ err = -EFBIG;
+ count = total_size;
}
- if (count) {
- char *base_addr;
+ if (count + p > total_size) {
+ if (!err)
+ err = -ENOSPC;
- base_addr = (char __force *)info->screen_base;
- count -= copy_from_user(base_addr + p, buf, count);
- *ppos += count;
- err = -EFAULT;
+ count = total_size - p;
}
- hecubafb_dpy_update(par);
+ dst = (void __force *) (info->screen_base + p);
+
+ if (copy_from_user(dst, buf, count))
+ err = -EFAULT;
- if (count)
- return count;
+ if (!err)
+ *ppos += count;
- return err;
+ hecubafb_dpy_update(par);
+
+ return (err) ? err : count;
}
static struct fb_ops hecubafb_ops = {
@@ -324,11 +215,21 @@ static struct fb_deferred_io hecubafb_defio = {
static int __devinit hecubafb_probe(struct platform_device *dev)
{
struct fb_info *info;
+ struct hecuba_board *board;
int retval = -ENOMEM;
int videomemorysize;
unsigned char *videomemory;
struct hecubafb_par *par;
+ /* pick up board specific routines */
+ board = dev->dev.platform_data;
+ if (!board)
+ return -EINVAL;
+
+ /* try to count device specific driver, if can't, platform recalls */
+ if (!try_module_get(board->owner))
+ return -ENODEV;
+
videomemorysize = (DPY_W*DPY_H)/8;
if (!(videomemory = vmalloc(videomemorysize)))
@@ -338,9 +239,9 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev);
if (!info)
- goto err;
+ goto err_fballoc;
- info->screen_base = (char __iomem *) videomemory;
+ info->screen_base = (char __force __iomem *)videomemory;
info->fbops = &hecubafb_ops;
info->var = hecubafb_var;
@@ -348,14 +249,10 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
info->fix.smem_len = videomemorysize;
par = info->par;
par->info = info;
+ par->board = board;
+ par->send_command = apollo_send_command;
+ par->send_data = apollo_send_data;
- if (!dio_addr || !cio_addr || !c2io_addr) {
- printk(KERN_WARNING "no IO addresses supplied\n");
- goto err1;
- }
- par->dio_addr = dio_addr;
- par->cio_addr = cio_addr;
- par->c2io_addr = c2io_addr;
info->flags = FBINFO_FLAG_DEFAULT;
info->fbdefio = &hecubafb_defio;
@@ -363,7 +260,7 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
retval = register_framebuffer(info);
if (retval < 0)
- goto err1;
+ goto err_fbreg;
platform_set_drvdata(dev, info);
printk(KERN_INFO
@@ -371,25 +268,16 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
info->node, videomemorysize >> 10);
/* this inits the dpy */
- apollo_init_control(par);
-
- apollo_send_command(par, APOLLO_INIT_DISPLAY);
- apollo_send_data(par, 0x81);
-
- /* have to wait while display resets */
- udelay(1000);
-
- /* if we were told to splash the screen, we just clear it */
- if (!nosplash) {
- apollo_send_command(par, APOLLO_ERASE_DISPLAY);
- apollo_send_data(par, splashval);
- }
+ retval = par->board->init(par);
+ if (retval < 0)
+ goto err_fbreg;
return 0;
-err1:
+err_fbreg:
framebuffer_release(info);
-err:
+err_fballoc:
vfree(videomemory);
+ module_put(board->owner);
return retval;
}
@@ -398,9 +286,13 @@ static int __devexit hecubafb_remove(struct platform_device *dev)
struct fb_info *info = platform_get_drvdata(dev);
if (info) {
+ struct hecubafb_par *par = info->par;
fb_deferred_io_cleanup(info);
unregister_framebuffer(info);
vfree((void __force *)info->screen_base);
+ if (par->board->remove)
+ par->board->remove(par);
+ module_put(par->board->owner);
framebuffer_release(info);
}
return 0;
@@ -410,62 +302,24 @@ static struct platform_driver hecubafb_driver = {
.probe = hecubafb_probe,
.remove = hecubafb_remove,
.driver = {
+ .owner = THIS_MODULE,
.name = "hecubafb",
},
};
-static struct platform_device *hecubafb_device;
-
static int __init hecubafb_init(void)
{
- int ret;
-
- if (!hecubafb_enable) {
- printk(KERN_ERR "Use hecubafb_enable to enable the device\n");
- return -ENXIO;
- }
-
- ret = platform_driver_register(&hecubafb_driver);
- if (!ret) {
- hecubafb_device = platform_device_alloc("hecubafb", 0);
- if (hecubafb_device)
- ret = platform_device_add(hecubafb_device);
- else
- ret = -ENOMEM;
-
- if (ret) {
- platform_device_put(hecubafb_device);
- platform_driver_unregister(&hecubafb_driver);
- }
- }
- return ret;
-
+ return platform_driver_register(&hecubafb_driver);
}
static void __exit hecubafb_exit(void)
{
- platform_device_unregister(hecubafb_device);
platform_driver_unregister(&hecubafb_driver);
}
-module_param(nosplash, uint, 0);
-MODULE_PARM_DESC(nosplash, "Disable doing the splash screen");
-module_param(hecubafb_enable, uint, 0);
-MODULE_PARM_DESC(hecubafb_enable, "Enable communication with Hecuba board");
-module_param(dio_addr, ulong, 0);
-MODULE_PARM_DESC(dio_addr, "IO address for data, eg: 0x480");
-module_param(cio_addr, ulong, 0);
-MODULE_PARM_DESC(cio_addr, "IO address for control, eg: 0x400");
-module_param(c2io_addr, ulong, 0);
-MODULE_PARM_DESC(c2io_addr, "IO address for secondary control, eg: 0x408");
-module_param(splashval, ulong, 0);
-MODULE_PARM_DESC(splashval, "Splash pattern: 0x00 is black, 0x01 is white");
-module_param(irq, uint, 0);
-MODULE_PARM_DESC(irq, "IRQ for the Hecuba board");
-
module_init(hecubafb_init);
module_exit(hecubafb_exit);
-MODULE_DESCRIPTION("fbdev driver for Hecuba board");
+MODULE_DESCRIPTION("fbdev driver for Hecuba/Apollo controller");
MODULE_AUTHOR("Jaya Kumar");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/imsttfb.c b/drivers/video/imsttfb.c
index 3ab91bf21576..15d50b9906ce 100644
--- a/drivers/video/imsttfb.c
+++ b/drivers/video/imsttfb.c
@@ -1151,8 +1151,10 @@ imsttfb_load_cursor_image(struct imstt_par *par, int width, int height, __u8 fgc
par->cmap_regs[TVPCRDAT] = 0xff; eieio();
}
par->cmap_regs[TVPCADRW] = 0x00; eieio();
- for (x = 0; x < 12; x++)
- par->cmap_regs[TVPCDATA] = fgc; eieio();
+ for (x = 0; x < 12; x++) {
+ par->cmap_regs[TVPCDATA] = fgc;
+ eieio();
+ }
}
return 1;
}
@@ -1476,7 +1478,7 @@ imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dp = pci_device_to_OF_node(pdev);
if(dp)
- printk(KERN_INFO "%s: OF name %s\n",__FUNCTION__, dp->name);
+ printk(KERN_INFO "%s: OF name %s\n",__func__, dp->name);
else
printk(KERN_ERR "imsttfb: no OF node for pci device\n");
#endif /* CONFIG_PPC_OF */
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 11609552a387..94e4d3ac1a05 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -415,7 +415,7 @@ static void imxfb_setup_gpio(struct imxfb_info *fbi)
static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
{
struct imxfb_info *fbi = platform_get_drvdata(dev);
- pr_debug("%s\n",__FUNCTION__);
+ pr_debug("%s\n",__func__);
imxfb_disable_controller(fbi);
return 0;
@@ -424,7 +424,7 @@ static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
static int imxfb_resume(struct platform_device *dev)
{
struct imxfb_info *fbi = platform_get_drvdata(dev);
- pr_debug("%s\n",__FUNCTION__);
+ pr_debug("%s\n",__func__);
imxfb_enable_controller(fbi);
return 0;
@@ -440,7 +440,7 @@ static int __init imxfb_init_fbinfo(struct device *dev)
struct fb_info *info = dev_get_drvdata(dev);
struct imxfb_info *fbi = info->par;
- pr_debug("%s\n",__FUNCTION__);
+ pr_debug("%s\n",__func__);
info->pseudo_palette = kmalloc( sizeof(u32) * 16, GFP_KERNEL);
if (!info->pseudo_palette)
diff --git a/drivers/video/intelfb/intelfb.h b/drivers/video/intelfb/intelfb.h
index 836796177942..3325fbd68ab3 100644
--- a/drivers/video/intelfb/intelfb.h
+++ b/drivers/video/intelfb/intelfb.h
@@ -12,9 +12,9 @@
#endif
/*** Version/name ***/
-#define INTELFB_VERSION "0.9.4"
+#define INTELFB_VERSION "0.9.5"
#define INTELFB_MODULE_NAME "intelfb"
-#define SUPPORTED_CHIPSETS "830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM"
+#define SUPPORTED_CHIPSETS "830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM"
/*** Debug/feature defines ***/
@@ -58,6 +58,8 @@
#define PCI_DEVICE_ID_INTEL_915GM 0x2592
#define PCI_DEVICE_ID_INTEL_945G 0x2772
#define PCI_DEVICE_ID_INTEL_945GM 0x27A2
+#define PCI_DEVICE_ID_INTEL_965G 0x29A2
+#define PCI_DEVICE_ID_INTEL_965GM 0x2A02
/* Size of MMIO region */
#define INTEL_REG_SIZE 0x80000
@@ -158,6 +160,8 @@ enum intel_chips {
INTEL_915GM,
INTEL_945G,
INTEL_945GM,
+ INTEL_965G,
+ INTEL_965GM,
};
struct intelfb_hwstate {
@@ -358,7 +362,9 @@ struct intelfb_info {
#define IS_I9XX(dinfo) (((dinfo)->chipset == INTEL_915G) || \
((dinfo)->chipset == INTEL_915GM) || \
((dinfo)->chipset == INTEL_945G) || \
- ((dinfo)->chipset==INTEL_945GM))
+ ((dinfo)->chipset == INTEL_945GM) || \
+ ((dinfo)->chipset == INTEL_965G) || \
+ ((dinfo)->chipset == INTEL_965GM))
#ifndef FBIO_WAITFORVSYNC
#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
diff --git a/drivers/video/intelfb/intelfb_i2c.c b/drivers/video/intelfb/intelfb_i2c.c
index 94c08bb5acf1..ca95f09d8b43 100644
--- a/drivers/video/intelfb/intelfb_i2c.c
+++ b/drivers/video/intelfb/intelfb_i2c.c
@@ -169,6 +169,8 @@ void intelfb_create_i2c_busses(struct intelfb_info *dinfo)
/* has some LVDS + tv-out */
case INTEL_945G:
case INTEL_945GM:
+ case INTEL_965G:
+ case INTEL_965GM:
/* SDVO ports have a single control bus - 2 devices */
dinfo->output[i].type = INTELFB_OUTPUT_SDVO;
intelfb_setup_i2c_bus(dinfo, &dinfo->output[i].i2c_bus,
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index 481d58f7535d..e44303f9bc52 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -2,7 +2,7 @@
* intelfb
*
* Linux framebuffer driver for Intel(R) 830M/845G/852GM/855GM/865G/915G/915GM/
- * 945G/945GM integrated graphics chips.
+ * 945G/945GM/965G/965GM integrated graphics chips.
*
* Copyright © 2002, 2003 David Dawes <dawes@xfree86.org>
* 2004 Sylvain Meyer
@@ -99,6 +99,9 @@
* Add vram option to reserve more memory than stolen by BIOS
* Fix intelfbhw_pan_display typo
* Add __initdata annotations
+ *
+ * 04/2008 - Version 0.9.5
+ * Add support for 965G/965GM. (Maik Broemme <mbroemme@plusserver.de>)
*/
#include <linux/module.h>
@@ -180,6 +183,8 @@ static struct pci_device_id intelfb_pci_table[] __devinitdata = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_915GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_915GM },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945G },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945GM },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_965G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_965G },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_965GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_965GM },
{ 0, }
};
@@ -549,7 +554,10 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
if ((ent->device == PCI_DEVICE_ID_INTEL_915G) ||
(ent->device == PCI_DEVICE_ID_INTEL_915GM) ||
(ent->device == PCI_DEVICE_ID_INTEL_945G) ||
- (ent->device == PCI_DEVICE_ID_INTEL_945GM)) {
+ (ent->device == PCI_DEVICE_ID_INTEL_945GM) ||
+ (ent->device == PCI_DEVICE_ID_INTEL_965G) ||
+ (ent->device == PCI_DEVICE_ID_INTEL_965GM)) {
+
aperture_bar = 2;
mmio_bar = 0;
}
diff --git a/drivers/video/intelfb/intelfbhw.c b/drivers/video/intelfb/intelfbhw.c
index fa1fff553565..8e6d6a4db0ad 100644
--- a/drivers/video/intelfb/intelfbhw.c
+++ b/drivers/video/intelfb/intelfbhw.c
@@ -143,6 +143,18 @@ int intelfbhw_get_chipset(struct pci_dev *pdev, struct intelfb_info *dinfo)
dinfo->mobile = 1;
dinfo->pll_index = PLLS_I9xx;
return 0;
+ case PCI_DEVICE_ID_INTEL_965G:
+ dinfo->name = "Intel(R) 965G";
+ dinfo->chipset = INTEL_965G;
+ dinfo->mobile = 0;
+ dinfo->pll_index = PLLS_I9xx;
+ return 0;
+ case PCI_DEVICE_ID_INTEL_965GM:
+ dinfo->name = "Intel(R) 965GM";
+ dinfo->chipset = INTEL_965GM;
+ dinfo->mobile = 1;
+ dinfo->pll_index = PLLS_I9xx;
+ return 0;
default:
return 1;
}
@@ -174,7 +186,9 @@ int intelfbhw_get_memory(struct pci_dev *pdev, int *aperture_size,
case PCI_DEVICE_ID_INTEL_915GM:
case PCI_DEVICE_ID_INTEL_945G:
case PCI_DEVICE_ID_INTEL_945GM:
- /* 915 and 945 chipsets support a 256MB aperture.
+ case PCI_DEVICE_ID_INTEL_965G:
+ case PCI_DEVICE_ID_INTEL_965GM:
+ /* 915, 945 and 965 chipsets support a 256MB aperture.
Aperture size is determined by inspected the
base address of the aperture. */
if (pci_resource_start(pdev, 2) & 0x08000000)
diff --git a/drivers/video/matrox/matroxfb_DAC1064.c b/drivers/video/matrox/matroxfb_DAC1064.c
index c4b570b4a4df..0ce3b0a89798 100644
--- a/drivers/video/matrox/matroxfb_DAC1064.c
+++ b/drivers/video/matrox/matroxfb_DAC1064.c
@@ -37,7 +37,7 @@ static void DAC1064_calcclock(CPMINFO unsigned int freq, unsigned int fmax, unsi
unsigned int fvco;
unsigned int p;
- DBG(__FUNCTION__)
+ DBG(__func__)
/* only for devices older than G450 */
@@ -83,7 +83,7 @@ static const unsigned char MGA1064_DAC[] = {
static void DAC1064_setpclk(WPMINFO unsigned long fout) {
unsigned int m, n, p;
- DBG(__FUNCTION__)
+ DBG(__func__)
DAC1064_calcclock(PMINFO fout, ACCESS_FBINFO(max_pixel_clock), &m, &n, &p);
ACCESS_FBINFO(hw).DACclk[0] = m;
@@ -95,7 +95,7 @@ static void DAC1064_setmclk(WPMINFO int oscinfo, unsigned long fmem) {
u_int32_t mx;
struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
- DBG(__FUNCTION__)
+ DBG(__func__)
if (ACCESS_FBINFO(devflags.noinit)) {
/* read MCLK and give up... */
@@ -338,7 +338,7 @@ void DAC1064_global_restore(WPMINFO2) {
static int DAC1064_init_1(WPMINFO struct my_timming* m) {
struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
- DBG(__FUNCTION__)
+ DBG(__func__)
memcpy(hw->DACreg, MGA1064_DAC, sizeof(MGA1064_DAC_regs));
switch (ACCESS_FBINFO(fbcon).var.bits_per_pixel) {
@@ -374,7 +374,7 @@ static int DAC1064_init_1(WPMINFO struct my_timming* m) {
static int DAC1064_init_2(WPMINFO struct my_timming* m) {
struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
- DBG(__FUNCTION__)
+ DBG(__func__)
if (ACCESS_FBINFO(fbcon).var.bits_per_pixel > 16) { /* 256 entries */
int i;
@@ -418,7 +418,7 @@ static void DAC1064_restore_1(WPMINFO2) {
CRITFLAGS
- DBG(__FUNCTION__)
+ DBG(__func__)
CRITBEGIN
@@ -448,7 +448,7 @@ static void DAC1064_restore_2(WPMINFO2) {
unsigned int i;
#endif
- DBG(__FUNCTION__)
+ DBG(__func__)
#ifdef DEBUG
dprintk(KERN_DEBUG "DAC1064regs ");
@@ -521,7 +521,7 @@ static struct matrox_altout g450out = {
static int MGA1064_init(WPMINFO struct my_timming* m) {
struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
- DBG(__FUNCTION__)
+ DBG(__func__)
if (DAC1064_init_1(PMINFO m)) return 1;
if (matroxfb_vgaHWinit(PMINFO m)) return 1;
@@ -543,7 +543,7 @@ static int MGA1064_init(WPMINFO struct my_timming* m) {
static int MGAG100_init(WPMINFO struct my_timming* m) {
struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
- DBG(__FUNCTION__)
+ DBG(__func__)
if (DAC1064_init_1(PMINFO m)) return 1;
hw->MXoptionReg &= ~0x2000;
@@ -565,7 +565,7 @@ static int MGAG100_init(WPMINFO struct my_timming* m) {
#ifdef CONFIG_FB_MATROX_MYSTIQUE
static void MGA1064_ramdac_init(WPMINFO2) {
- DBG(__FUNCTION__)
+ DBG(__func__)
/* ACCESS_FBINFO(features.DAC1064.vco_freq_min) = 120000; */
ACCESS_FBINFO(features.pll.vco_freq_min) = 62000;
@@ -594,7 +594,7 @@ static void MGAG100_progPixClock(CPMINFO int flags, int m, int n, int p) {
int selClk;
int clk;
- DBG(__FUNCTION__)
+ DBG(__func__)
outDAC1064(PMINFO M1064_XPIXCLKCTRL, inDAC1064(PMINFO M1064_XPIXCLKCTRL) | M1064_XPIXCLKCTRL_DIS |
M1064_XPIXCLKCTRL_PLL_UP);
@@ -636,7 +636,7 @@ static void MGAG100_progPixClock(CPMINFO int flags, int m, int n, int p) {
static void MGAG100_setPixClock(CPMINFO int flags, int freq) {
unsigned int m, n, p;
- DBG(__FUNCTION__)
+ DBG(__func__)
DAC1064_calcclock(PMINFO freq, ACCESS_FBINFO(max_pixel_clock), &m, &n, &p);
MGAG100_progPixClock(PMINFO flags, m, n, p);
@@ -650,7 +650,7 @@ static int MGA1064_preinit(WPMINFO2) {
2048, 0};
struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
- DBG(__FUNCTION__)
+ DBG(__func__)
/* ACCESS_FBINFO(capable.cfb4) = 0; ... preinitialized by 0 */
ACCESS_FBINFO(capable.text) = 1;
@@ -683,7 +683,7 @@ static int MGA1064_preinit(WPMINFO2) {
static void MGA1064_reset(WPMINFO2) {
- DBG(__FUNCTION__);
+ DBG(__func__);
MGA1064_ramdac_init(PMINFO2);
}
@@ -819,7 +819,7 @@ static int MGAG100_preinit(WPMINFO2) {
u_int32_t q;
#endif
- DBG(__FUNCTION__)
+ DBG(__func__)
/* there are some instabilities if in_div > 19 && vco < 61000 */
if (ACCESS_FBINFO(devflags.g450dac)) {
@@ -956,7 +956,7 @@ static void MGAG100_reset(WPMINFO2) {
u_int8_t b;
struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
- DBG(__FUNCTION__)
+ DBG(__func__)
{
#ifdef G100_BROKEN_IBM_82351
@@ -1015,7 +1015,7 @@ static void MGA1064_restore(WPMINFO2) {
CRITFLAGS
- DBG(__FUNCTION__)
+ DBG(__func__)
CRITBEGIN
@@ -1041,7 +1041,7 @@ static void MGAG100_restore(WPMINFO2) {
CRITFLAGS
- DBG(__FUNCTION__)
+ DBG(__func__)
CRITBEGIN
diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c
index 9445cdb759b1..13524821e242 100644
--- a/drivers/video/matrox/matroxfb_Ti3026.c
+++ b/drivers/video/matrox/matroxfb_Ti3026.c
@@ -283,7 +283,7 @@ static int Ti3026_calcclock(CPMINFO unsigned int freq, unsigned int fmax, int* i
unsigned int fvco;
unsigned int lin, lfeed, lpost;
- DBG(__FUNCTION__)
+ DBG(__func__)
fvco = PLL_calcclock(PMINFO freq, fmax, &lin, &lfeed, &lpost);
fvco >>= (*post = lpost);
@@ -297,7 +297,7 @@ static int Ti3026_setpclk(WPMINFO int clk) {
unsigned int pixfeed, pixin, pixpost;
struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
- DBG(__FUNCTION__)
+ DBG(__func__)
f_pll = Ti3026_calcclock(PMINFO clk, ACCESS_FBINFO(max_pixel_clock), &pixin, &pixfeed, &pixpost);
@@ -365,7 +365,7 @@ static int Ti3026_init(WPMINFO struct my_timming* m) {
u_int8_t muxctrl = isInterleave(MINFO) ? TVP3026_XMUXCTRL_MEMORY_64BIT : TVP3026_XMUXCTRL_MEMORY_32BIT;
struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
- DBG(__FUNCTION__)
+ DBG(__func__)
memcpy(hw->DACreg, MGADACbpp32, sizeof(hw->DACreg));
switch (ACCESS_FBINFO(fbcon).var.bits_per_pixel) {
@@ -440,7 +440,7 @@ static void ti3026_setMCLK(WPMINFO int fout){
unsigned int rfhcnt, mclk_ctl;
int tmout;
- DBG(__FUNCTION__)
+ DBG(__func__)
f_pll = Ti3026_calcclock(PMINFO fout, ACCESS_FBINFO(max_pixel_clock), &mclk_n, &mclk_m, &mclk_p);
@@ -534,7 +534,7 @@ static void ti3026_setMCLK(WPMINFO int fout){
static void ti3026_ramdac_init(WPMINFO2) {
- DBG(__FUNCTION__)
+ DBG(__func__)
ACCESS_FBINFO(features.pll.vco_freq_min) = 110000;
ACCESS_FBINFO(features.pll.ref_freq) = 114545;
@@ -554,7 +554,7 @@ static void Ti3026_restore(WPMINFO2) {
struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
CRITFLAGS
- DBG(__FUNCTION__)
+ DBG(__func__)
#ifdef DEBUG
dprintk(KERN_INFO "EXTVGA regs: ");
@@ -662,7 +662,7 @@ static void Ti3026_restore(WPMINFO2) {
static void Ti3026_reset(WPMINFO2) {
- DBG(__FUNCTION__)
+ DBG(__func__)
ti3026_ramdac_init(PMINFO2);
}
@@ -680,7 +680,7 @@ static int Ti3026_preinit(WPMINFO2) {
2048, 0};
struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
- DBG(__FUNCTION__)
+ DBG(__func__)
ACCESS_FBINFO(millenium) = 1;
ACCESS_FBINFO(milleniumII) = (ACCESS_FBINFO(pcidev)->device != PCI_DEVICE_ID_MATROX_MIL);
diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c
index 3660d2673bdc..9c3aeee1cc4f 100644
--- a/drivers/video/matrox/matroxfb_accel.c
+++ b/drivers/video/matrox/matroxfb_accel.c
@@ -113,7 +113,7 @@ void matrox_cfbX_init(WPMINFO2) {
u_int32_t mopmode;
int accel;
- DBG(__FUNCTION__)
+ DBG(__func__)
mpitch = ACCESS_FBINFO(fbcon).var.xres_virtual;
@@ -199,7 +199,7 @@ static void matrox_accel_bmove(WPMINFO int vxres, int sy, int sx, int dy, int dx
int start, end;
CRITFLAGS
- DBG(__FUNCTION__)
+ DBG(__func__)
CRITBEGIN
@@ -235,7 +235,7 @@ static void matrox_accel_bmove_lin(WPMINFO int vxres, int sy, int sx, int dy, in
int start, end;
CRITFLAGS
- DBG(__FUNCTION__)
+ DBG(__func__)
CRITBEGIN
@@ -287,7 +287,7 @@ static void matroxfb_accel_clear(WPMINFO u_int32_t color, int sy, int sx, int he
int width) {
CRITFLAGS
- DBG(__FUNCTION__)
+ DBG(__func__)
CRITBEGIN
@@ -315,7 +315,7 @@ static void matroxfb_cfb4_clear(WPMINFO u_int32_t bgx, int sy, int sx, int heigh
int whattodo;
CRITFLAGS
- DBG(__FUNCTION__)
+ DBG(__func__)
CRITBEGIN
@@ -388,7 +388,7 @@ static void matroxfb_1bpp_imageblit(WPMINFO u_int32_t fgx, u_int32_t bgx,
int easy;
CRITFLAGS
- DBG_HEAVY(__FUNCTION__);
+ DBG_HEAVY(__func__);
step = (width + 7) >> 3;
charcell = height * step;
@@ -469,7 +469,7 @@ static void matroxfb_1bpp_imageblit(WPMINFO u_int32_t fgx, u_int32_t bgx,
static void matroxfb_imageblit(struct fb_info* info, const struct fb_image* image) {
MINFO_FROM_INFO(info);
- DBG_HEAVY(__FUNCTION__);
+ DBG_HEAVY(__func__);
if (image->depth == 1) {
u_int32_t fgx, bgx;
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index b25972ac6eeb..54e82f35353d 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -312,7 +312,7 @@ static void matrox_pan_var(WPMINFO struct fb_var_screeninfo *var) {
CRITFLAGS
- DBG(__FUNCTION__)
+ DBG(__func__)
if (ACCESS_FBINFO(dead))
return;
@@ -392,7 +392,7 @@ static int matroxfb_open(struct fb_info *info, int user)
{
MINFO_FROM_INFO(info);
- DBG_LOOP(__FUNCTION__)
+ DBG_LOOP(__func__)
if (ACCESS_FBINFO(dead)) {
return -ENXIO;
@@ -408,7 +408,7 @@ static int matroxfb_release(struct fb_info *info, int user)
{
MINFO_FROM_INFO(info);
- DBG_LOOP(__FUNCTION__)
+ DBG_LOOP(__func__)
if (user) {
if (0 == --ACCESS_FBINFO(userusecount)) {
@@ -425,7 +425,7 @@ static int matroxfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info* info) {
MINFO_FROM_INFO(info);
- DBG(__FUNCTION__)
+ DBG(__func__)
matrox_pan_var(PMINFO var);
return 0;
@@ -434,7 +434,7 @@ static int matroxfb_pan_display(struct fb_var_screeninfo *var,
static int matroxfb_get_final_bppShift(CPMINFO int bpp) {
int bppshft2;
- DBG(__FUNCTION__)
+ DBG(__func__)
bppshft2 = bpp;
if (!bppshft2) {
@@ -451,7 +451,7 @@ static int matroxfb_test_and_set_rounding(CPMINFO int xres, int bpp) {
int over;
int rounding;
- DBG(__FUNCTION__)
+ DBG(__func__)
switch (bpp) {
case 0: return xres;
@@ -482,7 +482,7 @@ static int matroxfb_pitch_adjust(CPMINFO int xres, int bpp) {
const int* width;
int xres_new;
- DBG(__FUNCTION__)
+ DBG(__func__)
if (!bpp) return xres;
@@ -504,7 +504,7 @@ static int matroxfb_pitch_adjust(CPMINFO int xres, int bpp) {
static int matroxfb_get_cmap_len(struct fb_var_screeninfo *var) {
- DBG(__FUNCTION__)
+ DBG(__func__)
switch (var->bits_per_pixel) {
case 4:
@@ -548,7 +548,7 @@ static int matroxfb_decode_var(CPMINFO struct fb_var_screeninfo *var, int *visua
unsigned int vramlen;
unsigned int memlen;
- DBG(__FUNCTION__)
+ DBG(__func__)
switch (bpp) {
case 4: if (!ACCESS_FBINFO(capable.cfb4)) return -EINVAL;
@@ -648,7 +648,7 @@ static int matroxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
struct matrox_fb_info* minfo = container_of(fb_info, struct matrox_fb_info, fbcon);
#endif
- DBG(__FUNCTION__)
+ DBG(__func__)
/*
* Set a single color register. The values supplied are
@@ -707,7 +707,7 @@ static int matroxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
static void matroxfb_init_fix(WPMINFO2)
{
struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix;
- DBG(__FUNCTION__)
+ DBG(__func__)
strcpy(fix->id,"MATROX");
@@ -722,7 +722,7 @@ static void matroxfb_init_fix(WPMINFO2)
static void matroxfb_update_fix(WPMINFO2)
{
struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix;
- DBG(__FUNCTION__)
+ DBG(__func__)
fix->smem_start = ACCESS_FBINFO(video.base) + ACCESS_FBINFO(curr.ydstorg.bytes);
fix->smem_len = ACCESS_FBINFO(video.len_usable) - ACCESS_FBINFO(curr.ydstorg.bytes);
@@ -753,7 +753,7 @@ static int matroxfb_set_par(struct fb_info *info)
struct fb_var_screeninfo *var;
MINFO_FROM_INFO(info);
- DBG(__FUNCTION__)
+ DBG(__func__)
if (ACCESS_FBINFO(dead)) {
return -ENXIO;
@@ -876,7 +876,7 @@ static int matroxfb_ioctl(struct fb_info *info,
void __user *argp = (void __user *)arg;
MINFO_FROM_INFO(info);
- DBG(__FUNCTION__)
+ DBG(__func__)
if (ACCESS_FBINFO(dead)) {
return -ENXIO;
@@ -1175,7 +1175,7 @@ static int matroxfb_blank(int blank, struct fb_info *info)
CRITFLAGS
MINFO_FROM_INFO(info);
- DBG(__FUNCTION__)
+ DBG(__func__)
if (ACCESS_FBINFO(dead))
return 1;
@@ -1287,7 +1287,7 @@ static int matroxfb_getmemory(WPMINFO unsigned int maxSize, unsigned int *realSi
unsigned char bytes[32];
unsigned char* tmp;
- DBG(__FUNCTION__)
+ DBG(__func__)
vm = ACCESS_FBINFO(video.vbase);
maxSize &= ~0x1FFFFF; /* must be X*2MB (really it must be 2 or X*4MB) */
@@ -1593,7 +1593,7 @@ static int initMatrox2(WPMINFO struct board* b){
{ },
};
- DBG(__FUNCTION__)
+ DBG(__func__)
/* set default values... */
vesafb_defined.accel_flags = FB_ACCELF_TEXT;
@@ -2006,7 +2006,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
#ifndef CONFIG_FB_MATROX_MULTIHEAD
static int registered = 0;
#endif
- DBG(__FUNCTION__)
+ DBG(__func__)
svid = pdev->subsystem_vendor;
sid = pdev->subsystem_device;
@@ -2301,7 +2301,7 @@ static void __exit matrox_done(void) {
static int __init matroxfb_setup(char *options) {
char *this_opt;
- DBG(__FUNCTION__)
+ DBG(__func__)
if (!options || !*options)
return 0;
@@ -2444,7 +2444,7 @@ static int __init matroxfb_init(void)
char *option = NULL;
int err = 0;
- DBG(__FUNCTION__)
+ DBG(__func__)
if (fb_get_options("matroxfb", &option))
return -ENODEV;
@@ -2556,7 +2556,7 @@ MODULE_PARM_DESC(cmode, "Specify the video depth that should be used (8bit defau
int __init init_module(void){
- DBG(__FUNCTION__)
+ DBG(__func__)
if (disabled)
return -ENXIO;
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index a6ab5b6a58d0..7ac4c5f6145d 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -420,7 +420,7 @@ static int matroxfb_dh_ioctl(struct fb_info *info,
#define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon))
MINFO_FROM(m2info->primary_dev);
- DBG(__FUNCTION__)
+ DBG(__func__)
switch (cmd) {
case FBIOGET_VBLANK:
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 0cd58f84fb46..89da27bd5c49 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -220,7 +220,7 @@ static int matroxfb_PLL_mavenclock(const struct matrox_pll_features2* pll,
unsigned int scrlen;
unsigned int fmax;
- DBG(__FUNCTION__)
+ DBG(__func__)
scrlen = htotal * (vtotal - 1);
fwant = htotal * vtotal;
diff --git a/drivers/video/matrox/matroxfb_misc.c b/drivers/video/matrox/matroxfb_misc.c
index ab7fb50bc1de..aaa3e538e5da 100644
--- a/drivers/video/matrox/matroxfb_misc.c
+++ b/drivers/video/matrox/matroxfb_misc.c
@@ -90,13 +90,13 @@
#include <linux/matroxfb.h>
void matroxfb_DAC_out(CPMINFO int reg, int val) {
- DBG_REG(__FUNCTION__)
+ DBG_REG(__func__)
mga_outb(M_RAMDAC_BASE+M_X_INDEX, reg);
mga_outb(M_RAMDAC_BASE+M_X_DATAREG, val);
}
int matroxfb_DAC_in(CPMINFO int reg) {
- DBG_REG(__FUNCTION__)
+ DBG_REG(__func__)
mga_outb(M_RAMDAC_BASE+M_X_INDEX, reg);
return mga_inb(M_RAMDAC_BASE+M_X_DATAREG);
}
@@ -104,7 +104,7 @@ int matroxfb_DAC_in(CPMINFO int reg) {
void matroxfb_var2my(struct fb_var_screeninfo* var, struct my_timming* mt) {
unsigned int pixclock = var->pixclock;
- DBG(__FUNCTION__)
+ DBG(__func__)
if (!pixclock) pixclock = 10000; /* 10ns = 100MHz */
mt->pixclock = 1000000000 / pixclock;
@@ -131,7 +131,7 @@ int matroxfb_PLL_calcclock(const struct matrox_pll_features* pll, unsigned int f
unsigned int fwant;
unsigned int p;
- DBG(__FUNCTION__)
+ DBG(__func__)
fwant = freq;
@@ -192,7 +192,7 @@ int matroxfb_vgaHWinit(WPMINFO struct my_timming* m) {
int i;
struct matrox_hw_state * const hw = &ACCESS_FBINFO(hw);
- DBG(__FUNCTION__)
+ DBG(__func__)
hw->SEQ[0] = 0x00;
hw->SEQ[1] = 0x01; /* or 0x09 */
@@ -336,7 +336,7 @@ void matroxfb_vgaHWrestore(WPMINFO2) {
struct matrox_hw_state * const hw = &ACCESS_FBINFO(hw);
CRITFLAGS
- DBG(__FUNCTION__)
+ DBG(__func__)
dprintk(KERN_INFO "MiscOutReg: %02X\n", hw->MiscOutReg);
dprintk(KERN_INFO "SEQ regs: ");
diff --git a/drivers/video/metronomefb.c b/drivers/video/metronomefb.c
index e9a89fd82757..249791286367 100644
--- a/drivers/video/metronomefb.c
+++ b/drivers/video/metronomefb.c
@@ -13,12 +13,10 @@
* Corporation. http://support.eink.com/community
*
* This driver is written to be used with the Metronome display controller.
- * It was tested with an E-Ink 800x600 Vizplex EPD on a Gumstix Connex board
- * using the Lyre interface board.
+ * It is intended to be architecture independent. A board specific driver
+ * must be used to perform all the physical IO interactions. An example
+ * is provided as am200epd.c
*
- * General notes:
- * - User must set metronomefb_enable=1 to enable it.
- * - See Documentation/fb/metronomefb.txt for how metronome works.
*/
#include <linux/module.h>
#include <linux/kernel.h>
@@ -38,9 +36,11 @@
#include <linux/uaccess.h>
#include <linux/irq.h>
-#include <asm/arch/pxa-regs.h>
+#include <video/metronomefb.h>
+
#include <asm/unaligned.h>
+
#define DEBUG 1
#ifdef DEBUG
#define DPRINTK(f, a...) printk(KERN_DEBUG "%s: " f, __func__ , ## a)
@@ -53,35 +53,6 @@
#define DPY_W 832
#define DPY_H 622
-struct metromem_desc {
- u32 mFDADR0;
- u32 mFSADR0;
- u32 mFIDR0;
- u32 mLDCMD0;
-};
-
-struct metromem_cmd {
- u16 opcode;
- u16 args[((64-2)/2)];
- u16 csum;
-};
-
-struct metronomefb_par {
- unsigned char *metromem;
- struct metromem_desc *metromem_desc;
- struct metromem_cmd *metromem_cmd;
- unsigned char *metromem_wfm;
- unsigned char *metromem_img;
- u16 *metromem_img_csum;
- u16 *csum_table;
- int metromemsize;
- dma_addr_t metromem_dma;
- dma_addr_t metromem_desc_dma;
- struct fb_info *info;
- wait_queue_head_t waitq;
- u8 frame_count;
-};
-
/* frame differs from image. frame includes non-visible pixels */
struct epd_frame {
int fw; /* frame width */
@@ -120,8 +91,7 @@ static struct fb_var_screeninfo metronomefb_var __devinitdata = {
.transp = { 0, 0, 0 },
};
-static unsigned int metronomefb_enable;
-
+/* the waveform structure that is coming from userspace firmware */
struct waveform_hdr {
u8 stuff[32];
@@ -301,165 +271,6 @@ static int load_waveform(u8 *mem, size_t size, u8 *metromem, int m, int t,
return 0;
}
-/* register offsets for gpio control */
-#define LED_GPIO_PIN 51
-#define STDBY_GPIO_PIN 48
-#define RST_GPIO_PIN 49
-#define RDY_GPIO_PIN 32
-#define ERR_GPIO_PIN 17
-#define PCBPWR_GPIO_PIN 16
-
-#define AF_SEL_GPIO_N 0x3
-#define GAFR0_U_OFFSET(pin) ((pin - 16) * 2)
-#define GAFR1_L_OFFSET(pin) ((pin - 32) * 2)
-#define GAFR1_U_OFFSET(pin) ((pin - 48) * 2)
-#define GPDR1_OFFSET(pin) (pin - 32)
-#define GPCR1_OFFSET(pin) (pin - 32)
-#define GPSR1_OFFSET(pin) (pin - 32)
-#define GPCR0_OFFSET(pin) (pin)
-#define GPSR0_OFFSET(pin) (pin)
-
-static void metronome_set_gpio_output(int pin, int val)
-{
- u8 index;
-
- index = pin >> 4;
-
- switch (index) {
- case 1:
- if (val)
- GPSR0 |= (1 << GPSR0_OFFSET(pin));
- else
- GPCR0 |= (1 << GPCR0_OFFSET(pin));
- break;
- case 2:
- break;
- case 3:
- if (val)
- GPSR1 |= (1 << GPSR1_OFFSET(pin));
- else
- GPCR1 |= (1 << GPCR1_OFFSET(pin));
- break;
- default:
- printk(KERN_ERR "unimplemented\n");
- }
-}
-
-static void __devinit metronome_init_gpio_pin(int pin, int dir)
-{
- u8 index;
- /* dir 0 is output, 1 is input
- - do 2 things here:
- - set gpio alternate function to standard gpio
- - set gpio direction to input or output */
-
- index = pin >> 4;
- switch (index) {
- case 1:
- GAFR0_U &= ~(AF_SEL_GPIO_N << GAFR0_U_OFFSET(pin));
-
- if (dir)
- GPDR0 &= ~(1 << pin);
- else
- GPDR0 |= (1 << pin);
- break;
- case 2:
- GAFR1_L &= ~(AF_SEL_GPIO_N << GAFR1_L_OFFSET(pin));
-
- if (dir)
- GPDR1 &= ~(1 << GPDR1_OFFSET(pin));
- else
- GPDR1 |= (1 << GPDR1_OFFSET(pin));
- break;
- case 3:
- GAFR1_U &= ~(AF_SEL_GPIO_N << GAFR1_U_OFFSET(pin));
-
- if (dir)
- GPDR1 &= ~(1 << GPDR1_OFFSET(pin));
- else
- GPDR1 |= (1 << GPDR1_OFFSET(pin));
- break;
- default:
- printk(KERN_ERR "unimplemented\n");
- }
-}
-
-static void __devinit metronome_init_gpio_regs(void)
-{
- metronome_init_gpio_pin(LED_GPIO_PIN, 0);
- metronome_set_gpio_output(LED_GPIO_PIN, 0);
-
- metronome_init_gpio_pin(STDBY_GPIO_PIN, 0);
- metronome_set_gpio_output(STDBY_GPIO_PIN, 0);
-
- metronome_init_gpio_pin(RST_GPIO_PIN, 0);
- metronome_set_gpio_output(RST_GPIO_PIN, 0);
-
- metronome_init_gpio_pin(RDY_GPIO_PIN, 1);
-
- metronome_init_gpio_pin(ERR_GPIO_PIN, 1);
-
- metronome_init_gpio_pin(PCBPWR_GPIO_PIN, 0);
- metronome_set_gpio_output(PCBPWR_GPIO_PIN, 0);
-}
-
-static void metronome_disable_lcd_controller(struct metronomefb_par *par)
-{
- LCSR = 0xffffffff; /* Clear LCD Status Register */
- LCCR0 |= LCCR0_DIS; /* Disable LCD Controller */
-
- /* we reset and just wait for things to settle */
- msleep(200);
-}
-
-static void metronome_enable_lcd_controller(struct metronomefb_par *par)
-{
- LCSR = 0xffffffff;
- FDADR0 = par->metromem_desc_dma;
- LCCR0 |= LCCR0_ENB;
-}
-
-static void __devinit metronome_init_lcdc_regs(struct metronomefb_par *par)
-{
- /* here we do:
- - disable the lcd controller
- - setup lcd control registers
- - setup dma descriptor
- - reenable lcd controller
- */
-
- /* disable the lcd controller */
- metronome_disable_lcd_controller(par);
-
- /* setup lcd control registers */
- LCCR0 = LCCR0_LDM | LCCR0_SFM | LCCR0_IUM | LCCR0_EFM | LCCR0_PAS
- | LCCR0_QDM | LCCR0_BM | LCCR0_OUM;
-
- LCCR1 = (epd_frame_table[0].fw/2 - 1) /* pixels per line */
- | (27 << 10) /* hsync pulse width - 1 */
- | (33 << 16) /* eol pixel count */
- | (33 << 24); /* bol pixel count */
-
- LCCR2 = (epd_frame_table[0].fh - 1) /* lines per panel */
- | (24 << 10) /* vsync pulse width - 1 */
- | (2 << 16) /* eof pixel count */
- | (0 << 24); /* bof pixel count */
-
- LCCR3 = 2 /* pixel clock divisor */
- | (24 << 8) /* AC Bias pin freq */
- | LCCR3_16BPP /* BPP */
- | LCCR3_PCP; /* PCP falling edge */
-
- /* setup dma descriptor */
- par->metromem_desc->mFDADR0 = par->metromem_desc_dma;
- par->metromem_desc->mFSADR0 = par->metromem_dma;
- par->metromem_desc->mFIDR0 = 0;
- par->metromem_desc->mLDCMD0 = epd_frame_table[0].fw
- * epd_frame_table[0].fh;
- /* reenable lcd controller */
- metronome_enable_lcd_controller(par);
-}
-
static int metronome_display_cmd(struct metronomefb_par *par)
{
int i;
@@ -493,8 +304,7 @@ static int metronome_display_cmd(struct metronomefb_par *par)
par->metromem_cmd->csum = cs;
par->metromem_cmd->opcode = opcode; /* display cmd */
- i = wait_event_interruptible_timeout(par->waitq, (GPLR1 & 0x01), HZ);
- return i;
+ return par->board->met_wait_event_intr(par);
}
static int __devinit metronome_powerup_cmd(struct metronomefb_par *par)
@@ -518,13 +328,12 @@ static int __devinit metronome_powerup_cmd(struct metronomefb_par *par)
par->metromem_cmd->csum = cs;
msleep(1);
- metronome_set_gpio_output(RST_GPIO_PIN, 1);
+ par->board->set_rst(par, 1);
msleep(1);
- metronome_set_gpio_output(STDBY_GPIO_PIN, 1);
+ par->board->set_stdby(par, 1);
- i = wait_event_timeout(par->waitq, (GPLR1 & 0x01), HZ);
- return i;
+ return par->board->met_wait_event(par);
}
static int __devinit metronome_config_cmd(struct metronomefb_par *par)
@@ -569,8 +378,7 @@ static int __devinit metronome_config_cmd(struct metronomefb_par *par)
par->metromem_cmd->csum = cs;
par->metromem_cmd->opcode = 0xCC10; /* config cmd */
- i = wait_event_timeout(par->waitq, (GPLR1 & 0x01), HZ);
- return i;
+ return par->board->met_wait_event(par);
}
static int __devinit metronome_init_cmd(struct metronomefb_par *par)
@@ -596,16 +404,19 @@ static int __devinit metronome_init_cmd(struct metronomefb_par *par)
par->metromem_cmd->csum = cs;
par->metromem_cmd->opcode = 0xCC20; /* init cmd */
- i = wait_event_timeout(par->waitq, (GPLR1 & 0x01), HZ);
- return i;
+ return par->board->met_wait_event(par);
}
static int __devinit metronome_init_regs(struct metronomefb_par *par)
{
int res;
- metronome_init_gpio_regs();
- metronome_init_lcdc_regs(par);
+ par->board->init_gpio_regs(par);
+
+ par->board->init_lcdc_regs(par);
+
+ /* now that lcd is setup, setup dma descriptor */
+ par->board->post_dma_setup(par);
res = metronome_powerup_cmd(par);
if (res)
@@ -616,8 +427,6 @@ static int __devinit metronome_init_regs(struct metronomefb_par *par)
return res;
res = metronome_init_cmd(par);
- if (res)
- return res;
return res;
}
@@ -632,7 +441,7 @@ static void metronomefb_dpy_update(struct metronomefb_par *par)
cksum = calc_img_cksum((u16 *) par->metromem_img,
(epd_frame_table[0].fw * DPY_H)/2);
- *((u16 *) (par->metromem_img) +
+ *((u16 *)(par->metromem_img) +
(epd_frame_table[0].fw * DPY_H)/2) = cksum;
metronome_display_cmd(par);
}
@@ -641,8 +450,8 @@ static u16 metronomefb_dpy_update_page(struct metronomefb_par *par, int index)
{
int i;
u16 csum = 0;
- u16 *buf = (u16 __force *) (par->info->screen_base + index);
- u16 *img = (u16 *) (par->metromem_img + index);
+ u16 *buf = (u16 __force *)(par->info->screen_base + index);
+ u16 *img = (u16 *)(par->metromem_img + index);
/* swizzle from vm to metromem and recalc cksum at the same time*/
for (i = 0; i < PAGE_SIZE/2; i++) {
@@ -678,7 +487,7 @@ static void metronomefb_fillrect(struct fb_info *info,
{
struct metronomefb_par *par = info->par;
- cfb_fillrect(info, rect);
+ sys_fillrect(info, rect);
metronomefb_dpy_update(par);
}
@@ -687,7 +496,7 @@ static void metronomefb_copyarea(struct fb_info *info,
{
struct metronomefb_par *par = info->par;
- cfb_copyarea(info, area);
+ sys_copyarea(info, area);
metronomefb_dpy_update(par);
}
@@ -696,7 +505,7 @@ static void metronomefb_imageblit(struct fb_info *info,
{
struct metronomefb_par *par = info->par;
- cfb_imageblit(info, image);
+ sys_imageblit(info, image);
metronomefb_dpy_update(par);
}
@@ -733,7 +542,7 @@ static ssize_t metronomefb_write(struct fb_info *info, const char __user *buf,
count = total_size - p;
}
- dst = (void __force *) (info->screen_base + p);
+ dst = (void __force *)(info->screen_base + p);
if (copy_from_user(dst, buf, count))
err = -EFAULT;
@@ -759,18 +568,10 @@ static struct fb_deferred_io metronomefb_defio = {
.deferred_io = metronomefb_dpy_deferred_io,
};
-static irqreturn_t metronome_handle_irq(int irq, void *dev_id)
-{
- struct fb_info *info = dev_id;
- struct metronomefb_par *par = info->par;
-
- wake_up_interruptible(&par->waitq);
- return IRQ_HANDLED;
-}
-
static int __devinit metronomefb_probe(struct platform_device *dev)
{
struct fb_info *info;
+ struct metronome_board *board;
int retval = -ENOMEM;
int videomemorysize;
unsigned char *videomemory;
@@ -779,17 +580,26 @@ static int __devinit metronomefb_probe(struct platform_device *dev)
int cmd_size, wfm_size, img_size, padding_size, totalsize;
int i;
+ /* pick up board specific routines */
+ board = dev->dev.platform_data;
+ if (!board)
+ return -EINVAL;
+
+ /* try to count device specific driver, if can't, platform recalls */
+ if (!try_module_get(board->owner))
+ return -ENODEV;
+
/* we have two blocks of memory.
info->screen_base which is vm, and is the fb used by apps.
par->metromem which is physically contiguous memory and
contains the display controller commands, waveform,
processed image data and padding. this is the data pulled
- by the pxa255's LCD controller and pushed to Metronome */
+ by the device's LCD controller and pushed to Metronome */
videomemorysize = (DPY_W*DPY_H);
videomemory = vmalloc(videomemorysize);
if (!videomemory)
- return retval;
+ return -ENOMEM;
memset(videomemory, 0, videomemorysize);
@@ -797,7 +607,7 @@ static int __devinit metronomefb_probe(struct platform_device *dev)
if (!info)
goto err_vfree;
- info->screen_base = (char __iomem *) videomemory;
+ info->screen_base = (char __force __iomem *)videomemory;
info->fbops = &metronomefb_ops;
info->var = metronomefb_var;
@@ -805,6 +615,7 @@ static int __devinit metronomefb_probe(struct platform_device *dev)
info->fix.smem_len = videomemorysize;
par = info->par;
par->info = info;
+ par->board = board;
init_waitqueue_head(&par->waitq);
/* this table caches per page csum values. */
@@ -849,11 +660,10 @@ static int __devinit metronomefb_probe(struct platform_device *dev)
par->metromem_desc_dma = par->metromem_dma + cmd_size + wfm_size
+ img_size + padding_size;
- /* load the waveform in. assume mode 3, temp 31 for now */
- /* a) request the waveform file from userspace
+ /* load the waveform in. assume mode 3, temp 31 for now
+ a) request the waveform file from userspace
b) process waveform and decode into metromem */
-
- retval = request_firmware(&fw_entry, "waveform.wbf", &dev->dev);
+ retval = request_firmware(&fw_entry, "metronome.wbf", &dev->dev);
if (retval < 0) {
printk(KERN_ERR "metronomefb: couldn't get waveform\n");
goto err_dma_free;
@@ -861,19 +671,14 @@ static int __devinit metronomefb_probe(struct platform_device *dev)
retval = load_waveform((u8 *) fw_entry->data, fw_entry->size,
par->metromem_wfm, 3, 31, &par->frame_count);
+ release_firmware(fw_entry);
if (retval < 0) {
printk(KERN_ERR "metronomefb: couldn't process waveform\n");
- goto err_ld_wfm;
+ goto err_dma_free;
}
- release_firmware(fw_entry);
- retval = request_irq(IRQ_GPIO(RDY_GPIO_PIN), metronome_handle_irq,
- IRQF_DISABLED, "Metronome", info);
- if (retval) {
- dev_err(&dev->dev, "request_irq failed: %d\n", retval);
- goto err_ld_wfm;
- }
- set_irq_type(IRQ_GPIO(RDY_GPIO_PIN), IRQT_FALLING);
+ if (board->setup_irq(info))
+ goto err_dma_free;
retval = metronome_init_regs(par);
if (retval < 0)
@@ -913,9 +718,7 @@ err_cmap:
err_fb_rel:
framebuffer_release(info);
err_free_irq:
- free_irq(IRQ_GPIO(RDY_GPIO_PIN), info);
-err_ld_wfm:
- release_firmware(fw_entry);
+ board->free_irq(info);
err_dma_free:
dma_free_writecombine(&dev->dev, par->metromemsize, par->metromem,
par->metromem_dma);
@@ -923,6 +726,7 @@ err_csum_table:
vfree(par->csum_table);
err_vfree:
vfree(videomemory);
+ module_put(board->owner);
return retval;
}
@@ -939,7 +743,8 @@ static int __devexit metronomefb_remove(struct platform_device *dev)
vfree(par->csum_table);
unregister_framebuffer(info);
vfree((void __force *)info->screen_base);
- free_irq(IRQ_GPIO(RDY_GPIO_PIN), info);
+ par->board->free_irq(info);
+ module_put(par->board->owner);
framebuffer_release(info);
}
return 0;
@@ -949,48 +754,21 @@ static struct platform_driver metronomefb_driver = {
.probe = metronomefb_probe,
.remove = metronomefb_remove,
.driver = {
+ .owner = THIS_MODULE,
.name = "metronomefb",
},
};
-static struct platform_device *metronomefb_device;
-
static int __init metronomefb_init(void)
{
- int ret;
-
- if (!metronomefb_enable) {
- printk(KERN_ERR
- "Use metronomefb_enable to enable the device\n");
- return -ENXIO;
- }
-
- ret = platform_driver_register(&metronomefb_driver);
- if (!ret) {
- metronomefb_device = platform_device_alloc("metronomefb", 0);
- if (metronomefb_device)
- ret = platform_device_add(metronomefb_device);
- else
- ret = -ENOMEM;
-
- if (ret) {
- platform_device_put(metronomefb_device);
- platform_driver_unregister(&metronomefb_driver);
- }
- }
- return ret;
-
+ return platform_driver_register(&metronomefb_driver);
}
static void __exit metronomefb_exit(void)
{
- platform_device_unregister(metronomefb_device);
platform_driver_unregister(&metronomefb_driver);
}
-module_param(metronomefb_enable, uint, 0);
-MODULE_PARM_DESC(metronomefb_enable, "Enable communication with Metronome");
-
module_init(metronomefb_init);
module_exit(metronomefb_exit);
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index 08d072552233..473562191586 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -22,7 +22,7 @@
((v).xres == (x) && (v).yres == (y))
#ifdef DEBUG
-#define DPRINTK(fmt, args...) printk("modedb %s: " fmt, __FUNCTION__ , ## args)
+#define DPRINTK(fmt, args...) printk("modedb %s: " fmt, __func__ , ## args)
#else
#define DPRINTK(fmt, args...)
#endif
@@ -522,7 +522,7 @@ int fb_find_mode(struct fb_var_screeninfo *var,
int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
unsigned int xres = 0, yres = 0, bpp = default_bpp, refresh = 0;
int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
- u32 best, diff;
+ u32 best, diff, tdiff;
for (i = namelen-1; i >= 0; i--) {
switch (name[i]) {
@@ -651,19 +651,27 @@ done:
return (refresh_specified) ? 2 : 1;
}
- diff = xres + yres;
+ diff = 2 * (xres + yres);
best = -1;
DPRINTK("Trying best-fit modes\n");
for (i = 0; i < dbsize; i++) {
- if (xres <= db[i].xres && yres <= db[i].yres) {
DPRINTK("Trying %ix%i\n", db[i].xres, db[i].yres);
if (!fb_try_mode(var, info, &db[i], bpp)) {
- if (diff > (db[i].xres - xres) + (db[i].yres - yres)) {
- diff = (db[i].xres - xres) + (db[i].yres - yres);
- best = i;
- }
+ tdiff = abs(db[i].xres - xres) +
+ abs(db[i].yres - yres);
+
+ /*
+ * Penalize modes with resolutions smaller
+ * than requested.
+ */
+ if (xres > db[i].xres || yres > db[i].yres)
+ tdiff += xres + yres;
+
+ if (diff > tdiff) {
+ diff = tdiff;
+ best = i;
+ }
}
- }
}
if (best != -1) {
fb_try_mode(var, info, &db[best], bpp);
diff --git a/drivers/video/n411.c b/drivers/video/n411.c
new file mode 100644
index 000000000000..935830fea7b6
--- /dev/null
+++ b/drivers/video/n411.c
@@ -0,0 +1,202 @@
+/*
+ * linux/drivers/video/n411.c -- Platform device for N411 EPD kit
+ *
+ * Copyright (C) 2008, Jaya Kumar
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
+ *
+ * This driver is written to be used with the Hecuba display controller
+ * board, and tested with the EInk 800x600 display in 1 bit mode.
+ * The interface between Hecuba and the host is TTL based GPIO. The
+ * GPIO requirements are 8 writable data lines and 6 lines for control.
+ * Only 4 of the controls are actually used here but 6 for future use.
+ * The driver requires the IO addresses for data and control GPIO at
+ * load time. It is also possible to use this display with a standard
+ * PC parallel port.
+ *
+ * General notes:
+ * - User must set dio_addr=0xIOADDR cio_addr=0xIOADDR c2io_addr=0xIOADDR
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/irq.h>
+
+#include <video/hecubafb.h>
+
+static unsigned long dio_addr;
+static unsigned long cio_addr;
+static unsigned long c2io_addr;
+static unsigned long splashval;
+static unsigned int nosplash;
+static unsigned char ctl;
+
+static void n411_set_ctl(struct hecubafb_par *par, unsigned char bit, unsigned
+ char state)
+{
+ switch (bit) {
+ case HCB_CD_BIT:
+ if (state)
+ ctl &= ~(HCB_CD_BIT);
+ else
+ ctl |= HCB_CD_BIT;
+ break;
+ case HCB_DS_BIT:
+ if (state)
+ ctl &= ~(HCB_DS_BIT);
+ else
+ ctl |= HCB_DS_BIT;
+ break;
+ }
+ outb(ctl, cio_addr);
+}
+
+static unsigned char n411_get_ctl(struct hecubafb_par *par)
+{
+ return inb(c2io_addr);
+}
+
+static void n411_set_data(struct hecubafb_par *par, unsigned char value)
+{
+ outb(value, dio_addr);
+}
+
+static void n411_wait_for_ack(struct hecubafb_par *par, int clear)
+{
+ int timeout;
+ unsigned char tmp;
+
+ timeout = 500;
+ do {
+ tmp = n411_get_ctl(par);
+ if ((tmp & HCB_ACK_BIT) && (!clear))
+ return;
+ else if (!(tmp & HCB_ACK_BIT) && (clear))
+ return;
+ udelay(1);
+ } while (timeout--);
+ printk(KERN_ERR "timed out waiting for ack\n");
+}
+
+static int n411_init_control(struct hecubafb_par *par)
+{
+ unsigned char tmp;
+ /* for init, we want the following setup to be set:
+ WUP = lo
+ ACK = hi
+ DS = hi
+ RW = hi
+ CD = lo
+ */
+
+ /* write WUP to lo, DS to hi, RW to hi, CD to lo */
+ ctl = HCB_WUP_BIT | HCB_RW_BIT | HCB_CD_BIT ;
+ n411_set_ctl(par, HCB_DS_BIT, 1);
+
+ /* check ACK is not lo */
+ tmp = n411_get_ctl(par);
+ if (tmp & HCB_ACK_BIT) {
+ printk(KERN_ERR "Fail because ACK is already low\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+
+static int n411_init_board(struct hecubafb_par *par)
+{
+ int retval;
+
+ retval = n411_init_control(par);
+ if (retval)
+ return retval;
+
+ par->send_command(par, APOLLO_INIT_DISPLAY);
+ par->send_data(par, 0x81);
+
+ /* have to wait while display resets */
+ udelay(1000);
+
+ /* if we were told to splash the screen, we just clear it */
+ if (!nosplash) {
+ par->send_command(par, APOLLO_ERASE_DISPLAY);
+ par->send_data(par, splashval);
+ }
+
+ return 0;
+}
+
+static struct hecuba_board n411_board = {
+ .owner = THIS_MODULE,
+ .init = n411_init_board,
+ .set_ctl = n411_set_ctl,
+ .set_data = n411_set_data,
+ .wait_for_ack = n411_wait_for_ack,
+};
+
+static struct platform_device *n411_device;
+static int __init n411_init(void)
+{
+ int ret;
+ if (!dio_addr || !cio_addr || !c2io_addr) {
+ printk(KERN_WARNING "no IO addresses supplied\n");
+ return -EINVAL;
+ }
+
+ /* request our platform independent driver */
+ request_module("hecubafb");
+
+ n411_device = platform_device_alloc("hecubafb", -1);
+ if (!n411_device)
+ return -ENOMEM;
+
+ platform_device_add_data(n411_device, &n411_board, sizeof(n411_board));
+
+ /* this _add binds hecubafb to n411. hecubafb refcounts n411 */
+ ret = platform_device_add(n411_device);
+
+ if (ret)
+ platform_device_put(n411_device);
+
+ return ret;
+
+}
+
+static void __exit n411_exit(void)
+{
+ platform_device_unregister(n411_device);
+}
+
+module_init(n411_init);
+module_exit(n411_exit);
+
+module_param(nosplash, uint, 0);
+MODULE_PARM_DESC(nosplash, "Disable doing the splash screen");
+module_param(dio_addr, ulong, 0);
+MODULE_PARM_DESC(dio_addr, "IO address for data, eg: 0x480");
+module_param(cio_addr, ulong, 0);
+MODULE_PARM_DESC(cio_addr, "IO address for control, eg: 0x400");
+module_param(c2io_addr, ulong, 0);
+MODULE_PARM_DESC(c2io_addr, "IO address for secondary control, eg: 0x408");
+module_param(splashval, ulong, 0);
+MODULE_PARM_DESC(splashval, "Splash pattern: 0x00 is black, 0x01 is white");
+
+MODULE_DESCRIPTION("board driver for n411 hecuba/apollo epd kit");
+MODULE_AUTHOR("Jaya Kumar");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/nvidia/nv_hw.c
index d1a10549f543..ed20a9871b33 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/nvidia/nv_hw.c
@@ -129,7 +129,7 @@ typedef struct {
int nvclk_khz;
char mem_page_miss;
char mem_latency;
- int memory_type;
+ u32 memory_type;
int memory_width;
char enable_video;
char gr_during_vid;
@@ -719,7 +719,7 @@ static void nForceUpdateArbitrationSettings(unsigned VClk,
memctrl >>= 16;
if ((memctrl == 0x1A9) || (memctrl == 0x1AB) || (memctrl == 0x1ED)) {
- int dimm[3];
+ u32 dimm[3];
dev = pci_get_bus_and_slot(0, 2);
pci_read_config_dword(dev, 0x40, &dimm[0]);
diff --git a/drivers/video/nvidia/nv_setup.c b/drivers/video/nvidia/nv_setup.c
index 82579d3a9970..d9627b57eb4d 100644
--- a/drivers/video/nvidia/nv_setup.c
+++ b/drivers/video/nvidia/nv_setup.c
@@ -265,12 +265,12 @@ static void nv10GetConfig(struct nvidia_par *par)
dev = pci_get_bus_and_slot(0, 1);
if ((par->Chipset & 0xffff) == 0x01a0) {
- int amt = 0;
+ u32 amt;
pci_read_config_dword(dev, 0x7c, &amt);
par->RamAmountKBytes = (((amt >> 6) & 31) + 1) * 1024;
} else if ((par->Chipset & 0xffff) == 0x01f0) {
- int amt = 0;
+ u32 amt;
pci_read_config_dword(dev, 0x84, &amt);
par->RamAmountKBytes = (((amt >> 4) & 127) + 1) * 1024;
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 596652d2831f..9dbb5a5a267b 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -43,14 +43,14 @@
#define NVTRACE if (0) printk
#endif
-#define NVTRACE_ENTER(...) NVTRACE("%s START\n", __FUNCTION__)
-#define NVTRACE_LEAVE(...) NVTRACE("%s END\n", __FUNCTION__)
+#define NVTRACE_ENTER(...) NVTRACE("%s START\n", __func__)
+#define NVTRACE_LEAVE(...) NVTRACE("%s END\n", __func__)
#ifdef CONFIG_FB_NVIDIA_DEBUG
#define assert(expr) \
if (!(expr)) { \
printk( "Assertion failed! %s,%s,%s,line=%d\n",\
- #expr,__FILE__,__FUNCTION__,__LINE__); \
+ #expr,__FILE__,__func__,__LINE__); \
BUG(); \
}
#else
@@ -1559,7 +1559,6 @@ static int __devinit nvidiafb_init(void)
module_init(nvidiafb_init);
-#ifdef MODULE
static void __exit nvidiafb_exit(void)
{
pci_unregister_driver(&nvidiafb_driver);
@@ -1615,5 +1614,3 @@ MODULE_PARM_DESC(nomtrr, "Disables MTRR support (0 or 1=disabled) "
MODULE_AUTHOR("Antonino Daplas");
MODULE_DESCRIPTION("Framebuffer driver for nVidia graphics chipset");
MODULE_LICENSE("GPL");
-#endif /* MODULE */
-
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 452433d46973..d7b3dcc0dc43 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -248,7 +248,7 @@ static void __iomem *offb_map_reg(struct device_node *np, int index,
static void __init offb_init_fb(const char *name, const char *full_name,
int width, int height, int depth,
int pitch, unsigned long address,
- struct device_node *dp)
+ int foreign_endian, struct device_node *dp)
{
unsigned long res_size = pitch * height * (depth + 7) / 8;
struct offb_par *par = &default_par;
@@ -397,7 +397,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
info->screen_base = ioremap(address, fix->smem_len);
info->par = par;
info->pseudo_palette = (void *) (info + 1);
- info->flags = FBINFO_DEFAULT;
+ info->flags = FBINFO_DEFAULT | foreign_endian;
fb_alloc_cmap(&info->cmap, 256, 0);
@@ -424,6 +424,15 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
u64 rstart, address = OF_BAD_ADDR;
const u32 *pp, *addrp, *up;
u64 asize;
+ int foreign_endian = 0;
+
+#ifdef __BIG_ENDIAN
+ if (of_get_property(dp, "little-endian", NULL))
+ foreign_endian = FBINFO_FOREIGN_ENDIAN;
+#else
+ if (of_get_property(dp, "big-endian", NULL))
+ foreign_endian = FBINFO_FOREIGN_ENDIAN;
+#endif
pp = of_get_property(dp, "linux,bootx-depth", &len);
if (pp == NULL)
@@ -509,7 +518,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
offb_init_fb(no_real_node ? "bootx" : dp->name,
no_real_node ? "display" : dp->full_name,
width, height, depth, pitch, address,
- no_real_node ? NULL : dp);
+ foreign_endian, no_real_node ? NULL : dp);
}
}
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index 30181b593829..3f1ca2adda3d 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -56,7 +56,7 @@
#undef PM2FB_MASTER_DEBUG
#ifdef PM2FB_MASTER_DEBUG
#define DPRINTK(a, b...) \
- printk(KERN_DEBUG "pm2fb: %s: " a, __FUNCTION__ , ## b)
+ printk(KERN_DEBUG "pm2fb: %s: " a, __func__ , ## b)
#else
#define DPRINTK(a, b...)
#endif
@@ -67,7 +67,7 @@
* Driver data
*/
static int hwcursor = 1;
-static char *mode __devinitdata;
+static char *mode_option __devinitdata;
/*
* The XFree GLINT driver will (I think to implement hardware cursor
@@ -1680,17 +1680,19 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
info->pixmap.scan_align = 1;
}
- if (!mode)
- mode = "640x480@60";
+ if (!mode_option)
+ mode_option = "640x480@60";
- err = fb_find_mode(&info->var, info, mode, NULL, 0, NULL, 8);
+ err = fb_find_mode(&info->var, info, mode_option, NULL, 0, NULL, 8);
if (!err || err == 4)
info->var = pm2fb_var;
- if (fb_alloc_cmap(&info->cmap, 256, 0) < 0)
+ retval = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (retval < 0)
goto err_exit_both;
- if (register_framebuffer(info) < 0)
+ retval = register_framebuffer(info);
+ if (retval < 0)
goto err_exit_all;
printk(KERN_INFO "fb%d: %s frame buffer device, memory = %dK.\n",
@@ -1797,7 +1799,7 @@ static int __init pm2fb_setup(char *options)
else if (!strncmp(this_opt, "noaccel", 7))
noaccel = 1;
else
- mode = this_opt;
+ mode_option = this_opt;
}
return 0;
}
@@ -1833,8 +1835,10 @@ static void __exit pm2fb_exit(void)
#ifdef MODULE
module_exit(pm2fb_exit);
-module_param(mode, charp, 0);
-MODULE_PARM_DESC(mode, "Preferred video mode e.g. '648x480-8@60'");
+module_param(mode_option, charp, 0);
+MODULE_PARM_DESC(mode_option, "Initial video mode e.g. '648x480-8@60'");
+module_param_named(mode, mode_option, charp, 0);
+MODULE_PARM_DESC(mode, "Initial video mode e.g. '648x480-8@60' (deprecated)");
module_param(lowhsync, bool, 0);
MODULE_PARM_DESC(lowhsync, "Force horizontal sync low regardless of mode");
module_param(lowvsync, bool, 0);
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index 5dba8cdd0517..68089d1456c2 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -45,7 +45,7 @@
#undef PM3FB_MASTER_DEBUG
#ifdef PM3FB_MASTER_DEBUG
#define DPRINTK(a, b...) \
- printk(KERN_DEBUG "pm3fb: %s: " a, __FUNCTION__ , ## b)
+ printk(KERN_DEBUG "pm3fb: %s: " a, __func__ , ## b)
#else
#define DPRINTK(a, b...)
#endif
@@ -1571,6 +1571,8 @@ module_exit(pm3fb_exit);
#endif
module_init(pm3fb_init);
+module_param(mode_option, charp, 0);
+MODULE_PARM_DESC(mode_option, "Initial video mode e.g. '648x480-8@60'");
module_param(noaccel, bool, 0);
MODULE_PARM_DESC(noaccel, "Disable acceleration");
module_param(hwcursor, int, 0644);
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 5c47968e7f21..d94c57ffbdb1 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -56,10 +56,6 @@
#include "rivafb.h"
#include "nvreg.h"
-#ifndef CONFIG_PCI /* sanity check */
-#error This driver requires PCI support.
-#endif
-
/* version number of this driver */
#define RIVAFB_VERSION "0.9.5b"
@@ -74,14 +70,14 @@
#define NVTRACE if(0) printk
#endif
-#define NVTRACE_ENTER(...) NVTRACE("%s START\n", __FUNCTION__)
-#define NVTRACE_LEAVE(...) NVTRACE("%s END\n", __FUNCTION__)
+#define NVTRACE_ENTER(...) NVTRACE("%s START\n", __func__)
+#define NVTRACE_LEAVE(...) NVTRACE("%s END\n", __func__)
#ifdef CONFIG_FB_RIVA_DEBUG
#define assert(expr) \
if(!(expr)) { \
printk( "Assertion failed! %s,%s,%s,line=%d\n",\
- #expr,__FILE__,__FUNCTION__,__LINE__); \
+ #expr,__FILE__,__func__,__LINE__); \
BUG(); \
}
#else
@@ -2213,14 +2209,12 @@ static int __devinit rivafb_init(void)
module_init(rivafb_init);
-#ifdef MODULE
static void __exit rivafb_exit(void)
{
pci_unregister_driver(&rivafb_driver);
}
module_exit(rivafb_exit);
-#endif /* MODULE */
module_param(noaccel, bool, 0);
MODULE_PARM_DESC(noaccel, "bool: disable acceleration");
diff --git a/drivers/video/riva/nv_driver.c b/drivers/video/riva/nv_driver.c
index a11026812d1b..f3694cf17e58 100644
--- a/drivers/video/riva/nv_driver.c
+++ b/drivers/video/riva/nv_driver.c
@@ -41,11 +41,6 @@
#include "rivafb.h"
#include "nvreg.h"
-
-#ifndef CONFIG_PCI /* sanity check */
-#error This driver requires PCI support.
-#endif
-
#define PFX "rivafb: "
static inline unsigned char MISCin(struct riva_par *par)
@@ -163,7 +158,7 @@ unsigned long riva_get_memlen(struct riva_par *par)
unsigned long memlen = 0;
unsigned int chipset = par->Chipset;
struct pci_dev* dev;
- int amt;
+ u32 amt;
switch (chip->Architecture) {
case NV_ARCH_03:
diff --git a/drivers/video/riva/riva_hw.c b/drivers/video/riva/riva_hw.c
index 13307703a9f0..78fdbf5178d7 100644
--- a/drivers/video/riva/riva_hw.c
+++ b/drivers/video/riva/riva_hw.c
@@ -231,7 +231,7 @@ typedef struct {
int nvclk_khz;
char mem_page_miss;
char mem_latency;
- int memory_type;
+ u32 memory_type;
int memory_width;
char enable_video;
char gr_during_vid;
@@ -2107,7 +2107,7 @@ static void nv10GetConfig
)
{
struct pci_dev* dev;
- int amt;
+ u32 amt;
#ifdef __BIG_ENDIAN
/* turn on big endian register access */
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 71fa6edb5c47..13b38cbbe4cf 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -430,9 +430,9 @@ static void s3c2410fb_activate_var(struct fb_info *info)
struct fb_var_screeninfo *var = &info->var;
int clkdiv = s3c2410fb_calc_pixclk(fbi, var->pixclock) / 2;
- dprintk("%s: var->xres = %d\n", __FUNCTION__, var->xres);
- dprintk("%s: var->yres = %d\n", __FUNCTION__, var->yres);
- dprintk("%s: var->bpp = %d\n", __FUNCTION__, var->bits_per_pixel);
+ dprintk("%s: var->xres = %d\n", __func__, var->xres);
+ dprintk("%s: var->yres = %d\n", __func__, var->yres);
+ dprintk("%s: var->bpp = %d\n", __func__, var->bits_per_pixel);
if (type == S3C2410_LCDCON1_TFT) {
s3c2410fb_calculate_tft_lcd_regs(info, &fbi->regs);
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 7d53bc23b9c7..2972f112dbed 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -132,10 +132,10 @@ static const struct svga_timing_regs s3_timing_regs = {
/* Module parameters */
-static char *mode = "640x480-8@60";
+static char *mode_option __devinitdata = "640x480-8@60";
#ifdef CONFIG_MTRR
-static int mtrr = 1;
+static int mtrr __devinitdata = 1;
#endif
static int fasttext = 1;
@@ -145,8 +145,10 @@ MODULE_AUTHOR("(c) 2006-2007 Ondrej Zajicek <santiago@crfreenet.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("fbdev driver for S3 Trio/Virge");
-module_param(mode, charp, 0444);
-MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc)");
+module_param(mode_option, charp, 0444);
+MODULE_PARM_DESC(mode_option, "Default video mode ('640x480-8@60', etc)");
+module_param_named(mode, mode_option, charp, 0444);
+MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc) (deprecated)");
#ifdef CONFIG_MTRR
module_param(mtrr, int, 0444);
@@ -886,7 +888,7 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
}
/* Allocate and fill driver data structure */
- info = framebuffer_alloc(sizeof(struct s3fb_info), NULL);
+ info = framebuffer_alloc(sizeof(struct s3fb_info), &(dev->dev));
if (!info) {
dev_err(&(dev->dev), "cannot allocate memory\n");
return -ENOMEM;
@@ -901,13 +903,13 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
/* Prepare PCI device */
rc = pci_enable_device(dev);
if (rc < 0) {
- dev_err(&(dev->dev), "cannot enable PCI device\n");
+ dev_err(info->dev, "cannot enable PCI device\n");
goto err_enable_device;
}
rc = pci_request_regions(dev, "s3fb");
if (rc < 0) {
- dev_err(&(dev->dev), "cannot reserve framebuffer region\n");
+ dev_err(info->dev, "cannot reserve framebuffer region\n");
goto err_request_regions;
}
@@ -919,7 +921,7 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
info->screen_base = pci_iomap(dev, 0, 0);
if (! info->screen_base) {
rc = -ENOMEM;
- dev_err(&(dev->dev), "iomap for framebuffer failed\n");
+ dev_err(info->dev, "iomap for framebuffer failed\n");
goto err_iomap;
}
@@ -960,22 +962,22 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
info->pseudo_palette = (void*) (par->pseudo_palette);
/* Prepare startup mode */
- rc = fb_find_mode(&(info->var), info, mode, NULL, 0, NULL, 8);
+ rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
if (! ((rc == 1) || (rc == 2))) {
rc = -EINVAL;
- dev_err(&(dev->dev), "mode %s not found\n", mode);
+ dev_err(info->dev, "mode %s not found\n", mode_option);
goto err_find_mode;
}
rc = fb_alloc_cmap(&info->cmap, 256, 0);
if (rc < 0) {
- dev_err(&(dev->dev), "cannot allocate colormap\n");
+ dev_err(info->dev, "cannot allocate colormap\n");
goto err_alloc_cmap;
}
rc = register_framebuffer(info);
if (rc < 0) {
- dev_err(&(dev->dev), "cannot register framebuffer\n");
+ dev_err(info->dev, "cannot register framebuffer\n");
goto err_reg_fb;
}
@@ -1051,7 +1053,7 @@ static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state)
struct fb_info *info = pci_get_drvdata(dev);
struct s3fb_info *par = info->par;
- dev_info(&(dev->dev), "suspend\n");
+ dev_info(info->dev, "suspend\n");
acquire_console_sem();
mutex_lock(&(par->open_lock));
@@ -1083,7 +1085,7 @@ static int s3_pci_resume(struct pci_dev* dev)
struct s3fb_info *par = info->par;
int err;
- dev_info(&(dev->dev), "resume\n");
+ dev_info(info->dev, "resume\n");
acquire_console_sem();
mutex_lock(&(par->open_lock));
@@ -1100,7 +1102,7 @@ static int s3_pci_resume(struct pci_dev* dev)
if (err) {
mutex_unlock(&(par->open_lock));
release_console_sem();
- dev_err(&(dev->dev), "error %d enabling device for resume\n", err);
+ dev_err(info->dev, "error %d enabling device for resume\n", err);
return err;
}
pci_set_master(dev);
@@ -1168,7 +1170,7 @@ static int __init s3fb_setup(char *options)
else if (!strncmp(opt, "fasttext:", 9))
fasttext = simple_strtoul(opt + 9, NULL, 0);
else
- mode = opt;
+ mode_option = opt;
}
return 0;
diff --git a/drivers/video/sa1100fb.h b/drivers/video/sa1100fb.h
index 48066ef3af05..f465b27ed860 100644
--- a/drivers/video/sa1100fb.h
+++ b/drivers/video/sa1100fb.h
@@ -132,7 +132,7 @@ struct sa1100fb_info {
* Debug macros
*/
#if DEBUG
-# define DPRINTK(fmt, args...) printk("%s: " fmt, __FUNCTION__ , ## args)
+# define DPRINTK(fmt, args...) printk("%s: " fmt, __func__ , ## args)
#else
# define DPRINTK(fmt, args...)
#endif
diff --git a/drivers/video/savage/savagefb-i2c.c b/drivers/video/savage/savagefb-i2c.c
index 35c1ce62b216..783d4adffb93 100644
--- a/drivers/video/savage/savagefb-i2c.c
+++ b/drivers/video/savage/savagefb-i2c.c
@@ -140,7 +140,7 @@ static int savage_setup_i2c_bus(struct savagefb_i2c_chan *chan,
chan->adapter.id = I2C_HW_B_SAVAGE;
chan->adapter.algo_data = &chan->algo;
chan->adapter.dev.parent = &chan->par->pcidev->dev;
- chan->algo.udelay = 40;
+ chan->algo.udelay = 10;
chan->algo.timeout = 20;
chan->algo.data = chan;
diff --git a/drivers/video/sis/sis.h b/drivers/video/sis/sis.h
index 9b05da6268f7..a14e82211037 100644
--- a/drivers/video/sis/sis.h
+++ b/drivers/video/sis/sis.h
@@ -55,7 +55,7 @@
#undef SISFBDEBUG
#ifdef SISFBDEBUG
-#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
#define TWDEBUG(x) printk(KERN_INFO x "\n");
#else
#define DPRINTK(fmt, args...)
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index 97784f9c184d..5b11a00f49bc 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -1006,7 +1006,7 @@ static int sst_set_pll_att_ti(struct fb_info *info,
break;
default:
dprintk("%s: wrong clock code '%d'\n",
- __FUNCTION__, clock);
+ __func__, clock);
return 0;
}
udelay(300);
@@ -1048,7 +1048,7 @@ static int sst_set_pll_ics(struct fb_info *info,
break;
default:
dprintk("%s: wrong clock code '%d'\n",
- __FUNCTION__, clock);
+ __func__, clock);
return 0;
}
udelay(300);
@@ -1079,7 +1079,7 @@ static void sst_set_vidmod_att_ti(struct fb_info *info, const int bpp)
sst_dac_write(DACREG_RMR, (cr0 & 0x0f) | DACREG_CR0_16BPP);
break;
default:
- dprintk("%s: bad depth '%u'\n", __FUNCTION__, bpp);
+ dprintk("%s: bad depth '%u'\n", __func__, bpp);
break;
}
}
@@ -1093,7 +1093,7 @@ static void sst_set_vidmod_ics(struct fb_info *info, const int bpp)
sst_dac_write(DACREG_ICS_CMD, DACREG_ICS_CMD_16BPP);
break;
default:
- dprintk("%s: bad depth '%u'\n", __FUNCTION__, bpp);
+ dprintk("%s: bad depth '%u'\n", __func__, bpp);
break;
}
}
@@ -1133,7 +1133,7 @@ static int __devinit sst_detect_dactype(struct fb_info *info, struct sstfb_par *
}
if (!ret)
return 0;
- f_dprintk("%s found %s\n", __FUNCTION__, dacs[i].name);
+ f_dprintk("%s found %s\n", __func__, dacs[i].name);
par->dac_sw = dacs[i];
return 1;
}
diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c
index f98be301140c..598d35eff935 100644
--- a/drivers/video/stifb.c
+++ b/drivers/video/stifb.c
@@ -164,11 +164,11 @@ static int __initdata stifb_bpp_pref[MAX_STI_ROMS];
# define DEBUG_ON() debug_on=1
# define WRITE_BYTE(value,fb,reg) do { if (debug_on) \
printk(KERN_DEBUG "%30s: WRITE_BYTE(0x%06x) = 0x%02x (old=0x%02x)\n", \
- __FUNCTION__, reg, value, READ_BYTE(fb,reg)); \
+ __func__, reg, value, READ_BYTE(fb,reg)); \
gsc_writeb((value),(fb)->info.fix.mmio_start + (reg)); } while (0)
# define WRITE_WORD(value,fb,reg) do { if (debug_on) \
printk(KERN_DEBUG "%30s: WRITE_WORD(0x%06x) = 0x%08x (old=0x%08x)\n", \
- __FUNCTION__, reg, value, READ_WORD(fb,reg)); \
+ __func__, reg, value, READ_WORD(fb,reg)); \
gsc_writel((value),(fb)->info.fix.mmio_start + (reg)); } while (0)
#endif /* DEBUG_STIFB_REGS */
diff --git a/drivers/video/syscopyarea.c b/drivers/video/syscopyarea.c
index 37af10ab8f52..a352d5f46bbf 100644
--- a/drivers/video/syscopyarea.c
+++ b/drivers/video/syscopyarea.c
@@ -26,15 +26,15 @@
*/
static void
-bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
- int src_idx, int bits, unsigned n)
+bitcpy(struct fb_info *p, unsigned long *dst, int dst_idx,
+ const unsigned long *src, int src_idx, int bits, unsigned n)
{
unsigned long first, last;
int const shift = dst_idx-src_idx;
int left, right;
- first = FB_SHIFT_HIGH(~0UL, dst_idx);
- last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits));
+ first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
+ last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
if (!shift) {
/* Same alignment for source and dest */
@@ -167,8 +167,8 @@ bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
*/
static void
-bitcpy_rev(unsigned long *dst, int dst_idx, const unsigned long *src,
- int src_idx, int bits, unsigned n)
+bitcpy_rev(struct fb_info *p, unsigned long *dst, int dst_idx,
+ const unsigned long *src, int src_idx, int bits, unsigned n)
{
unsigned long first, last;
int shift;
@@ -186,8 +186,8 @@ bitcpy_rev(unsigned long *dst, int dst_idx, const unsigned long *src,
shift = dst_idx-src_idx;
- first = FB_SHIFT_LOW(~0UL, bits - 1 - dst_idx);
- last = ~(FB_SHIFT_LOW(~0UL, bits - 1 - ((dst_idx-n) % bits)));
+ first = FB_SHIFT_LOW(p, ~0UL, bits - 1 - dst_idx);
+ last = ~(FB_SHIFT_LOW(p, ~0UL, bits - 1 - ((dst_idx-n) % bits)));
if (!shift) {
/* Same alignment for source and dest */
@@ -353,7 +353,7 @@ void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area)
dst_idx &= (bytes - 1);
src += src_idx >> (ffs(bits) - 1);
src_idx &= (bytes - 1);
- bitcpy_rev(dst, dst_idx, src, src_idx, bits,
+ bitcpy_rev(p, dst, dst_idx, src, src_idx, bits,
width*p->var.bits_per_pixel);
}
} else {
@@ -362,7 +362,7 @@ void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area)
dst_idx &= (bytes - 1);
src += src_idx >> (ffs(bits) - 1);
src_idx &= (bytes - 1);
- bitcpy(dst, dst_idx, src, src_idx, bits,
+ bitcpy(p, dst, dst_idx, src, src_idx, bits,
width*p->var.bits_per_pixel);
dst_idx += bits_per_line;
src_idx += bits_per_line;
diff --git a/drivers/video/sysfillrect.c b/drivers/video/sysfillrect.c
index a261e9e6a675..f94d6b6e29ee 100644
--- a/drivers/video/sysfillrect.c
+++ b/drivers/video/sysfillrect.c
@@ -22,16 +22,16 @@
*/
static void
-bitfill_aligned(unsigned long *dst, int dst_idx, unsigned long pat,
- unsigned n, int bits)
+bitfill_aligned(struct fb_info *p, unsigned long *dst, int dst_idx,
+ unsigned long pat, unsigned n, int bits)
{
unsigned long first, last;
if (!n)
return;
- first = FB_SHIFT_HIGH(~0UL, dst_idx);
- last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits));
+ first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
+ last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
if (dst_idx+n <= bits) {
/* Single word */
@@ -78,16 +78,16 @@ bitfill_aligned(unsigned long *dst, int dst_idx, unsigned long pat,
*/
static void
-bitfill_unaligned(unsigned long *dst, int dst_idx, unsigned long pat,
- int left, int right, unsigned n, int bits)
+bitfill_unaligned(struct fb_info *p, unsigned long *dst, int dst_idx,
+ unsigned long pat, int left, int right, unsigned n, int bits)
{
unsigned long first, last;
if (!n)
return;
- first = FB_SHIFT_HIGH(~0UL, dst_idx);
- last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits));
+ first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
+ last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
if (dst_idx+n <= bits) {
/* Single word */
@@ -132,8 +132,8 @@ bitfill_unaligned(unsigned long *dst, int dst_idx, unsigned long pat,
* Aligned pattern invert using 32/64-bit memory accesses
*/
static void
-bitfill_aligned_rev(unsigned long *dst, int dst_idx, unsigned long pat,
- unsigned n, int bits)
+bitfill_aligned_rev(struct fb_info *p, unsigned long *dst, int dst_idx,
+ unsigned long pat, unsigned n, int bits)
{
unsigned long val = pat;
unsigned long first, last;
@@ -141,8 +141,8 @@ bitfill_aligned_rev(unsigned long *dst, int dst_idx, unsigned long pat,
if (!n)
return;
- first = FB_SHIFT_HIGH(~0UL, dst_idx);
- last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits));
+ first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
+ last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
if (dst_idx+n <= bits) {
/* Single word */
@@ -188,16 +188,17 @@ bitfill_aligned_rev(unsigned long *dst, int dst_idx, unsigned long pat,
*/
static void
-bitfill_unaligned_rev(unsigned long *dst, int dst_idx, unsigned long pat,
- int left, int right, unsigned n, int bits)
+bitfill_unaligned_rev(struct fb_info *p, unsigned long *dst, int dst_idx,
+ unsigned long pat, int left, int right, unsigned n,
+ int bits)
{
unsigned long first, last;
if (!n)
return;
- first = FB_SHIFT_HIGH(~0UL, dst_idx);
- last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits));
+ first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
+ last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
if (dst_idx+n <= bits) {
/* Single word */
@@ -267,9 +268,9 @@ void sys_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
if (p->fbops->fb_sync)
p->fbops->fb_sync(p);
if (!left) {
- void (*fill_op32)(unsigned long *dst, int dst_idx,
- unsigned long pat, unsigned n, int bits) =
- NULL;
+ void (*fill_op32)(struct fb_info *p, unsigned long *dst,
+ int dst_idx, unsigned long pat, unsigned n,
+ int bits) = NULL;
switch (rect->rop) {
case ROP_XOR:
@@ -287,16 +288,16 @@ void sys_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
while (height--) {
dst += dst_idx >> (ffs(bits) - 1);
dst_idx &= (bits - 1);
- fill_op32(dst, dst_idx, pat, width*bpp, bits);
+ fill_op32(p, dst, dst_idx, pat, width*bpp, bits);
dst_idx += p->fix.line_length*8;
}
} else {
int right;
int r;
int rot = (left-dst_idx) % bpp;
- void (*fill_op)(unsigned long *dst, int dst_idx,
- unsigned long pat, int left, int right,
- unsigned n, int bits) = NULL;
+ void (*fill_op)(struct fb_info *p, unsigned long *dst,
+ int dst_idx, unsigned long pat, int left,
+ int right, unsigned n, int bits) = NULL;
/* rotate pattern to correct start position */
pat = pat << rot | pat >> (bpp-rot);
@@ -318,7 +319,7 @@ void sys_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
while (height--) {
dst += dst_idx >> (ffs(bits) - 1);
dst_idx &= (bits - 1);
- fill_op(dst, dst_idx, pat, left, right,
+ fill_op(p, dst, dst_idx, pat, left, right,
width*bpp, bits);
r = (p->fix.line_length*8) % bpp;
pat = pat << (bpp-r) | pat >> r;
diff --git a/drivers/video/sysimgblt.c b/drivers/video/sysimgblt.c
index bd7e7e9d155f..186c6f607be2 100644
--- a/drivers/video/sysimgblt.c
+++ b/drivers/video/sysimgblt.c
@@ -18,35 +18,31 @@
#define DEBUG
#ifdef DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt,__FUNCTION__,## args)
+#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt,__func__,## args)
#else
#define DPRINTK(fmt, args...)
#endif
-static const u32 cfb_tab8[] = {
-#if defined(__BIG_ENDIAN)
+static const u32 cfb_tab8_be[] = {
0x00000000,0x000000ff,0x0000ff00,0x0000ffff,
0x00ff0000,0x00ff00ff,0x00ffff00,0x00ffffff,
0xff000000,0xff0000ff,0xff00ff00,0xff00ffff,
0xffff0000,0xffff00ff,0xffffff00,0xffffffff
-#elif defined(__LITTLE_ENDIAN)
+};
+
+static const u32 cfb_tab8_le[] = {
0x00000000,0xff000000,0x00ff0000,0xffff0000,
0x0000ff00,0xff00ff00,0x00ffff00,0xffffff00,
0x000000ff,0xff0000ff,0x00ff00ff,0xffff00ff,
0x0000ffff,0xff00ffff,0x00ffffff,0xffffffff
-#else
-#error FIXME: No endianness??
-#endif
};
-static const u32 cfb_tab16[] = {
-#if defined(__BIG_ENDIAN)
+static const u32 cfb_tab16_be[] = {
0x00000000, 0x0000ffff, 0xffff0000, 0xffffffff
-#elif defined(__LITTLE_ENDIAN)
+};
+
+static const u32 cfb_tab16_le[] = {
0x00000000, 0xffff0000, 0x0000ffff, 0xffffffff
-#else
-#error FIXME: No endianness??
-#endif
};
static const u32 cfb_tab32[] = {
@@ -72,7 +68,7 @@ static void color_imageblit(const struct fb_image *image, struct fb_info *p,
val = 0;
if (start_index) {
- u32 start_mask = ~(FB_SHIFT_HIGH(~(u32)0,
+ u32 start_mask = ~(FB_SHIFT_HIGH(p, ~(u32)0,
start_index));
val = *dst & start_mask;
shift = start_index;
@@ -83,20 +79,20 @@ static void color_imageblit(const struct fb_image *image, struct fb_info *p,
color = palette[*src];
else
color = *src;
- color <<= FB_LEFT_POS(bpp);
- val |= FB_SHIFT_HIGH(color, shift);
+ color <<= FB_LEFT_POS(p, bpp);
+ val |= FB_SHIFT_HIGH(p, color, shift);
if (shift >= null_bits) {
*dst++ = val;
val = (shift == null_bits) ? 0 :
- FB_SHIFT_LOW(color, 32 - shift);
+ FB_SHIFT_LOW(p, color, 32 - shift);
}
shift += bpp;
shift &= (32 - 1);
src++;
}
if (shift) {
- u32 end_mask = FB_SHIFT_HIGH(~(u32)0, shift);
+ u32 end_mask = FB_SHIFT_HIGH(p, ~(u32)0, shift);
*dst &= end_mask;
*dst |= val;
@@ -125,8 +121,8 @@ static void slow_imageblit(const struct fb_image *image, struct fb_info *p,
u32 i, j, l;
dst2 = dst1;
- fgcolor <<= FB_LEFT_POS(bpp);
- bgcolor <<= FB_LEFT_POS(bpp);
+ fgcolor <<= FB_LEFT_POS(p, bpp);
+ bgcolor <<= FB_LEFT_POS(p, bpp);
for (i = image->height; i--; ) {
shift = val = 0;
@@ -137,7 +133,8 @@ static void slow_imageblit(const struct fb_image *image, struct fb_info *p,
/* write leading bits */
if (start_index) {
- u32 start_mask = ~(FB_SHIFT_HIGH(~(u32)0,start_index));
+ u32 start_mask = ~(FB_SHIFT_HIGH(p, ~(u32)0,
+ start_index));
val = *dst & start_mask;
shift = start_index;
}
@@ -145,13 +142,13 @@ static void slow_imageblit(const struct fb_image *image, struct fb_info *p,
while (j--) {
l--;
color = (*s & (1 << l)) ? fgcolor : bgcolor;
- val |= FB_SHIFT_HIGH(color, shift);
+ val |= FB_SHIFT_HIGH(p, color, shift);
/* Did the bitshift spill bits to the next long? */
if (shift >= null_bits) {
*dst++ = val;
val = (shift == null_bits) ? 0 :
- FB_SHIFT_LOW(color,32 - shift);
+ FB_SHIFT_LOW(p, color, 32 - shift);
}
shift += bpp;
shift &= (32 - 1);
@@ -160,7 +157,7 @@ static void slow_imageblit(const struct fb_image *image, struct fb_info *p,
/* write trailing bits */
if (shift) {
- u32 end_mask = FB_SHIFT_HIGH(~(u32)0, shift);
+ u32 end_mask = FB_SHIFT_HIGH(p, ~(u32)0, shift);
*dst &= end_mask;
*dst |= val;
@@ -199,10 +196,10 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
switch (bpp) {
case 8:
- tab = cfb_tab8;
+ tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
break;
case 16:
- tab = cfb_tab16;
+ tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
break;
case 32:
default:
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index 71e179ea5f95..ea9f19d25597 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -70,7 +70,7 @@
#include <video/tdfx.h>
-#define DPRINTK(a, b...) pr_debug("fb: %s: " a, __FUNCTION__ , ## b)
+#define DPRINTK(a, b...) pr_debug("fb: %s: " a, __func__ , ## b)
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c
index 0a4e07d43d2d..bd54cd0de39a 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/tridentfb.c
@@ -58,7 +58,7 @@ static int displaytype;
/* defaults which are normally overriden by user values */
/* video mode */
-static char *mode = "640x480";
+static char *mode_option __devinitdata = "640x480";
static int bpp = 8;
static int noaccel;
@@ -73,7 +73,10 @@ static int memsize;
static int memdiff;
static int nativex;
-module_param(mode, charp, 0);
+module_param(mode_option, charp, 0);
+MODULE_PARM_DESC(mode_option, "Initial video mode e.g. '648x480-8@60'");
+module_param_named(mode, mode_option, charp, 0);
+MODULE_PARM_DESC(mode, "Initial video mode e.g. '648x480-8@60' (deprecated)");
module_param(bpp, int, 0);
module_param(center, int, 0);
module_param(stretch, int, 0);
@@ -1297,7 +1300,8 @@ static int __devinit trident_pci_probe(struct pci_dev * dev,
#endif
fb_info.pseudo_palette = pseudo_pal;
- if (!fb_find_mode(&default_var, &fb_info, mode, NULL, 0, NULL, bpp)) {
+ if (!fb_find_mode(&default_var, &fb_info,
+ mode_option, NULL, 0, NULL, bpp)) {
err = -EINVAL;
goto out_unmap2;
}
@@ -1385,7 +1389,7 @@ static struct pci_driver tridentfb_pci_driver = {
* video=trident:800x600,bpp=16,noaccel
*/
#ifndef MODULE
-static int tridentfb_setup(char *options)
+static int __init tridentfb_setup(char *options)
{
char *opt;
if (!options || !*options)
@@ -1412,7 +1416,7 @@ static int tridentfb_setup(char *options)
else if (!strncmp(opt, "nativex=", 8))
nativex = simple_strtoul(opt + 8, NULL, 0);
else
- mode = opt;
+ mode_option = opt;
}
return 0;
}
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 93361656316c..cdbb56edb6cb 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -181,7 +181,8 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
/* If all slots are taken -- bail out. */
if (uvfb_tasks[seq]) {
mutex_unlock(&uvfb_lock);
- return -EBUSY;
+ err = -EBUSY;
+ goto out;
}
/* Save a pointer to the kernel part of the task struct. */
@@ -205,7 +206,6 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
err = cn_netlink_send(m, 0, gfp_any());
}
}
- kfree(m);
if (!err && !(task->t.flags & TF_EXIT))
err = !wait_for_completion_timeout(task->done,
@@ -218,7 +218,8 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
seq++;
if (seq >= UVESAFB_TASKS_MAX)
seq = 0;
-
+out:
+ kfree(m);
return err;
}
@@ -885,7 +886,7 @@ static int __devinit uvesafb_vbe_init_mode(struct fb_info *info)
}
/* fb_find_mode() failed */
- if (i == 0 || i >= 3) {
+ if (i == 0) {
info->var.xres = 640;
info->var.yres = 480;
mode = (struct fb_videomode *)
diff --git a/drivers/video/vermilion/vermilion.c b/drivers/video/vermilion/vermilion.c
index 2aa71eb67c2b..c18f1884b550 100644
--- a/drivers/video/vermilion/vermilion.c
+++ b/drivers/video/vermilion/vermilion.c
@@ -112,8 +112,9 @@ static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order,
/*
* It seems like __get_free_pages only ups the usage count
- * of the first page. This doesn't work with nopage mapping, so
- * up the usage count once more.
+ * of the first page. This doesn't work with fault mapping, so
+ * up the usage count once more (XXX: should use split_page or
+ * compound page).
*/
memset((void *)va->logical, 0x00, va->size);
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index 4c3a63308df1..536ab11623f0 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -100,7 +100,7 @@ static struct svga_timing_regs vt8623_timing_regs = {
/* Module parameters */
-static char *mode = "640x480-8@60";
+static char *mode_option = "640x480-8@60";
#ifdef CONFIG_MTRR
static int mtrr = 1;
@@ -110,8 +110,10 @@ MODULE_AUTHOR("(c) 2006 Ondrej Zajicek <santiago@crfreenet.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("fbdev driver for integrated graphics core in VIA VT8623 [CLE266]");
-module_param(mode, charp, 0644);
-MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc)");
+module_param(mode_option, charp, 0644);
+MODULE_PARM_DESC(mode_option, "Default video mode ('640x480-8@60', etc)");
+module_param_named(mode, mode_option, charp, 0);
+MODULE_PARM_DESC(mode, "Default video mode e.g. '648x480-8@60' (deprecated)");
#ifdef CONFIG_MTRR
module_param(mtrr, int, 0444);
@@ -434,6 +436,10 @@ static int vt8623fb_set_par(struct fb_info *info)
svga_wcrt_multi(vt8623_offset_regs, offset_value);
svga_wseq_multi(vt8623_fetch_count_regs, fetch_value);
+ /* Clear H/V Skew */
+ svga_wcrt_mask(0x03, 0x00, 0x60);
+ svga_wcrt_mask(0x05, 0x00, 0x60);
+
if (info->var.vmode & FB_VMODE_DOUBLE)
svga_wcrt_mask(0x09, 0x80, 0x80);
else
@@ -655,7 +661,7 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
}
/* Allocate and fill driver data structure */
- info = framebuffer_alloc(sizeof(struct vt8623fb_info), NULL);
+ info = framebuffer_alloc(sizeof(struct vt8623fb_info), &(dev->dev));
if (! info) {
dev_err(&(dev->dev), "cannot allocate memory\n");
return -ENOMEM;
@@ -671,13 +677,13 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
rc = pci_enable_device(dev);
if (rc < 0) {
- dev_err(&(dev->dev), "cannot enable PCI device\n");
+ dev_err(info->dev, "cannot enable PCI device\n");
goto err_enable_device;
}
rc = pci_request_regions(dev, "vt8623fb");
if (rc < 0) {
- dev_err(&(dev->dev), "cannot reserve framebuffer region\n");
+ dev_err(info->dev, "cannot reserve framebuffer region\n");
goto err_request_regions;
}
@@ -690,14 +696,14 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
info->screen_base = pci_iomap(dev, 0, 0);
if (! info->screen_base) {
rc = -ENOMEM;
- dev_err(&(dev->dev), "iomap for framebuffer failed\n");
+ dev_err(info->dev, "iomap for framebuffer failed\n");
goto err_iomap_1;
}
par->mmio_base = pci_iomap(dev, 1, 0);
if (! par->mmio_base) {
rc = -ENOMEM;
- dev_err(&(dev->dev), "iomap for MMIO failed\n");
+ dev_err(info->dev, "iomap for MMIO failed\n");
goto err_iomap_2;
}
@@ -708,7 +714,7 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
if ((16 <= memsize1) && (memsize1 <= 64) && (memsize1 == memsize2))
info->screen_size = memsize1 << 20;
else {
- dev_err(&(dev->dev), "memory size detection failed (%x %x), suppose 16 MB\n", memsize1, memsize2);
+ dev_err(info->dev, "memory size detection failed (%x %x), suppose 16 MB\n", memsize1, memsize2);
info->screen_size = 16 << 20;
}
@@ -722,22 +728,22 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
/* Prepare startup mode */
- rc = fb_find_mode(&(info->var), info, mode, NULL, 0, NULL, 8);
+ rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
if (! ((rc == 1) || (rc == 2))) {
rc = -EINVAL;
- dev_err(&(dev->dev), "mode %s not found\n", mode);
+ dev_err(info->dev, "mode %s not found\n", mode_option);
goto err_find_mode;
}
rc = fb_alloc_cmap(&info->cmap, 256, 0);
if (rc < 0) {
- dev_err(&(dev->dev), "cannot allocate colormap\n");
+ dev_err(info->dev, "cannot allocate colormap\n");
goto err_alloc_cmap;
}
rc = register_framebuffer(info);
if (rc < 0) {
- dev_err(&(dev->dev), "cannot register framebugger\n");
+ dev_err(info->dev, "cannot register framebugger\n");
goto err_reg_fb;
}
@@ -811,7 +817,7 @@ static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
struct fb_info *info = pci_get_drvdata(dev);
struct vt8623fb_info *par = info->par;
- dev_info(&(dev->dev), "suspend\n");
+ dev_info(info->dev, "suspend\n");
acquire_console_sem();
mutex_lock(&(par->open_lock));
@@ -842,7 +848,7 @@ static int vt8623_pci_resume(struct pci_dev* dev)
struct fb_info *info = pci_get_drvdata(dev);
struct vt8623fb_info *par = info->par;
- dev_info(&(dev->dev), "resume\n");
+ dev_info(info->dev, "resume\n");
acquire_console_sem();
mutex_lock(&(par->open_lock));
@@ -913,7 +919,7 @@ static int __init vt8623fb_init(void)
return -ENODEV;
if (option && *option)
- mode = option;
+ mode_option = option;
#endif
pr_debug("vt8623fb: initializing\n");
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index 003c49a490eb..30469bf906e5 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -765,8 +765,10 @@ int __init w100fb_probe(struct platform_device *pdev)
printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id);
return 0;
out:
- fb_dealloc_cmap(&info->cmap);
- kfree(info->pseudo_palette);
+ if (info) {
+ fb_dealloc_cmap(&info->cmap);
+ kfree(info->pseudo_palette);
+ }
if (remapped_fbuf != NULL)
iounmap(remapped_fbuf);
if (remapped_regs != NULL)
diff --git a/fs/aio.c b/fs/aio.c
index 228368610dfa..ae94e1dea266 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1166,7 +1166,10 @@ retry:
break;
if (min_nr <= i)
break;
- ret = 0;
+ if (unlikely(ctx->dead)) {
+ ret = -EINVAL;
+ break;
+ }
if (to.timed_out) /* Only check after read evt */
break;
/* Try to only show up in io wait if there are ops
@@ -1231,6 +1234,13 @@ static void io_destroy(struct kioctx *ioctx)
aio_cancel_all(ioctx);
wait_for_all_aios(ioctx);
+
+ /*
+ * Wake up any waiters. The setting of ctx->dead must be seen
+ * by other CPUs at this point. Right now, we rely on the
+ * locking done by the above calls to ensure this consistency.
+ */
+ wake_up(&ioctx->wait);
put_ioctx(ioctx); /* once for the lookup */
}
diff --git a/fs/buffer.c b/fs/buffer.c
index 39ff14403d13..3db4a26adc44 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -360,16 +360,19 @@ void invalidate_bdev(struct block_device *bdev)
*/
static void free_more_memory(void)
{
- struct zone **zones;
- pg_data_t *pgdat;
+ struct zone *zone;
+ int nid;
wakeup_pdflush(1024);
yield();
- for_each_online_pgdat(pgdat) {
- zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
- if (*zones)
- try_to_free_pages(zones, 0, GFP_NOFS);
+ for_each_online_node(nid) {
+ (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
+ gfp_zone(GFP_NOFS), NULL,
+ &zone);
+ if (zone)
+ try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
+ GFP_NOFS);
}
}
@@ -2243,6 +2246,8 @@ int cont_expand_zero(struct file *file, struct address_space *mapping,
goto out;
BUG_ON(err != len);
err = 0;
+
+ balance_dirty_pages_ratelimited(mapping);
}
/* page covers the boundary, find the boundary offset */
@@ -3180,8 +3185,7 @@ static void recalc_bh_state(void)
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
{
- struct buffer_head *ret = kmem_cache_alloc(bh_cachep,
- set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
+ struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
get_cpu_var(bh_accounting).nr++;
diff --git a/fs/dquot.c b/fs/dquot.c
index 41b9dbd68b0e..dfba1623cccb 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -289,7 +289,15 @@ static void wait_on_dquot(struct dquot *dquot)
mutex_unlock(&dquot->dq_lock);
}
-#define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot))
+static inline int dquot_dirty(struct dquot *dquot)
+{
+ return test_bit(DQ_MOD_B, &dquot->dq_flags);
+}
+
+static inline int mark_dquot_dirty(struct dquot *dquot)
+{
+ return dquot->dq_sb->dq_op->mark_dirty(dquot);
+}
int dquot_mark_dquot_dirty(struct dquot *dquot)
{
@@ -1441,31 +1449,43 @@ static inline void set_enable_flags(struct quota_info *dqopt, int type)
switch (type) {
case USRQUOTA:
dqopt->flags |= DQUOT_USR_ENABLED;
+ dqopt->flags &= ~DQUOT_USR_SUSPENDED;
break;
case GRPQUOTA:
dqopt->flags |= DQUOT_GRP_ENABLED;
+ dqopt->flags &= ~DQUOT_GRP_SUSPENDED;
break;
}
}
-static inline void reset_enable_flags(struct quota_info *dqopt, int type)
+static inline void reset_enable_flags(struct quota_info *dqopt, int type,
+ int remount)
{
switch (type) {
case USRQUOTA:
dqopt->flags &= ~DQUOT_USR_ENABLED;
+ if (remount)
+ dqopt->flags |= DQUOT_USR_SUSPENDED;
+ else
+ dqopt->flags &= ~DQUOT_USR_SUSPENDED;
break;
case GRPQUOTA:
dqopt->flags &= ~DQUOT_GRP_ENABLED;
+ if (remount)
+ dqopt->flags |= DQUOT_GRP_SUSPENDED;
+ else
+ dqopt->flags &= ~DQUOT_GRP_SUSPENDED;
break;
}
}
+
/*
* Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
*/
-int vfs_quota_off(struct super_block *sb, int type)
+int vfs_quota_off(struct super_block *sb, int type, int remount)
{
- int cnt;
+ int cnt, ret = 0;
struct quota_info *dqopt = sb_dqopt(sb);
struct inode *toputinode[MAXQUOTAS];
@@ -1475,9 +1495,17 @@ int vfs_quota_off(struct super_block *sb, int type)
toputinode[cnt] = NULL;
if (type != -1 && cnt != type)
continue;
+ /* If we keep inodes of quota files after remount and quotaoff
+ * is called, drop kept inodes. */
+ if (!remount && sb_has_quota_suspended(sb, cnt)) {
+ iput(dqopt->files[cnt]);
+ dqopt->files[cnt] = NULL;
+ reset_enable_flags(dqopt, cnt, 0);
+ continue;
+ }
if (!sb_has_quota_enabled(sb, cnt))
continue;
- reset_enable_flags(dqopt, cnt);
+ reset_enable_flags(dqopt, cnt, remount);
/* Note: these are blocking operations */
drop_dquot_ref(sb, cnt);
@@ -1493,7 +1521,8 @@ int vfs_quota_off(struct super_block *sb, int type)
put_quota_format(dqopt->info[cnt].dqi_format);
toputinode[cnt] = dqopt->files[cnt];
- dqopt->files[cnt] = NULL;
+ if (!remount)
+ dqopt->files[cnt] = NULL;
dqopt->info[cnt].dqi_flags = 0;
dqopt->info[cnt].dqi_igrace = 0;
dqopt->info[cnt].dqi_bgrace = 0;
@@ -1523,12 +1552,19 @@ int vfs_quota_off(struct super_block *sb, int type)
mutex_unlock(&toputinode[cnt]->i_mutex);
mark_inode_dirty(toputinode[cnt]);
}
- iput(toputinode[cnt]);
mutex_unlock(&dqopt->dqonoff_mutex);
+ /* On remount RO, we keep the inode pointer so that we
+ * can reenable quota on the subsequent remount RW.
+ * But we have better not keep inode pointer when there
+ * is pending delete on the quota file... */
+ if (!remount)
+ iput(toputinode[cnt]);
+ else if (!toputinode[cnt]->i_nlink)
+ ret = -EBUSY;
}
if (sb->s_bdev)
invalidate_bdev(sb->s_bdev);
- return 0;
+ return ret;
}
/*
@@ -1566,7 +1602,8 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
invalidate_bdev(sb->s_bdev);
mutex_lock(&inode->i_mutex);
mutex_lock(&dqopt->dqonoff_mutex);
- if (sb_has_quota_enabled(sb, type)) {
+ if (sb_has_quota_enabled(sb, type) ||
+ sb_has_quota_suspended(sb, type)) {
error = -EBUSY;
goto out_lock;
}
@@ -1589,6 +1626,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
dqopt->ops[type] = fmt->qf_ops;
dqopt->info[type].dqi_format = fmt;
+ dqopt->info[type].dqi_fmt_id = format_id;
INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
mutex_lock(&dqopt->dqio_mutex);
if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) {
@@ -1624,12 +1662,41 @@ out_fmt:
return error;
}
+/* Reenable quotas on remount RW */
+static int vfs_quota_on_remount(struct super_block *sb, int type)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+ struct inode *inode;
+ int ret;
+
+ mutex_lock(&dqopt->dqonoff_mutex);
+ if (!sb_has_quota_suspended(sb, type)) {
+ mutex_unlock(&dqopt->dqonoff_mutex);
+ return 0;
+ }
+ BUG_ON(sb_has_quota_enabled(sb, type));
+
+ inode = dqopt->files[type];
+ dqopt->files[type] = NULL;
+ reset_enable_flags(dqopt, type, 0);
+ mutex_unlock(&dqopt->dqonoff_mutex);
+
+ ret = vfs_quota_on_inode(inode, type, dqopt->info[type].dqi_fmt_id);
+ iput(inode);
+
+ return ret;
+}
+
/* Actual function called from quotactl() */
-int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path)
+int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path,
+ int remount)
{
struct nameidata nd;
int error;
+ if (remount)
+ return vfs_quota_on_remount(sb, type);
+
error = path_lookup(path, LOOKUP_FOLLOW, &nd);
if (error < 0)
return error;
@@ -1709,10 +1776,19 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
}
/* Generic routine for setting common part of quota structure */
-static void do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
+static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
{
struct mem_dqblk *dm = &dquot->dq_dqb;
int check_blim = 0, check_ilim = 0;
+ struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
+
+ if ((di->dqb_valid & QIF_BLIMITS &&
+ (di->dqb_bhardlimit > dqi->dqi_maxblimit ||
+ di->dqb_bsoftlimit > dqi->dqi_maxblimit)) ||
+ (di->dqb_valid & QIF_ILIMITS &&
+ (di->dqb_ihardlimit > dqi->dqi_maxilimit ||
+ di->dqb_isoftlimit > dqi->dqi_maxilimit)))
+ return -ERANGE;
spin_lock(&dq_data_lock);
if (di->dqb_valid & QIF_SPACE) {
@@ -1744,7 +1820,7 @@ static void do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
}
else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */
- dm->dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace;
+ dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
}
if (check_ilim) {
if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) {
@@ -1752,7 +1828,7 @@ static void do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
clear_bit(DQ_INODES_B, &dquot->dq_flags);
}
else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */
- dm->dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
+ dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
}
if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit)
clear_bit(DQ_FAKE_B, &dquot->dq_flags);
@@ -1760,21 +1836,24 @@ static void do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
set_bit(DQ_FAKE_B, &dquot->dq_flags);
spin_unlock(&dq_data_lock);
mark_dquot_dirty(dquot);
+
+ return 0;
}
int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
{
struct dquot *dquot;
+ int rc;
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
if (!(dquot = dqget(sb, id, type))) {
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return -ESRCH;
}
- do_set_dqblk(dquot, di);
+ rc = do_set_dqblk(dquot, di);
dqput(dquot);
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
- return 0;
+ return rc;
}
/* Generic routine for getting common part of quota file information */
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index e7b2bafa1dd9..10bb02c3f25c 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -106,7 +106,7 @@ static int ext2_valid_block_bitmap(struct super_block *sb,
return 1;
err_out:
- ext2_error(sb, __FUNCTION__,
+ ext2_error(sb, __func__,
"Invalid block bitmap - "
"block_group = %d, block = %lu",
block_group, bitmap_blk);
@@ -132,7 +132,7 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
- ext2_error(sb, __FUNCTION__,
+ ext2_error(sb, __func__,
"Cannot read block bitmap - "
"block_group = %d, block_bitmap = %u",
block_group, le32_to_cpu(desc->bg_block_bitmap));
@@ -143,17 +143,18 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
if (bh_submit_read(bh) < 0) {
brelse(bh);
- ext2_error(sb, __FUNCTION__,
+ ext2_error(sb, __func__,
"Cannot read block bitmap - "
"block_group = %d, block_bitmap = %u",
block_group, le32_to_cpu(desc->bg_block_bitmap));
return NULL;
}
- if (!ext2_valid_block_bitmap(sb, desc, block_group, bh)) {
- brelse(bh);
- return NULL;
- }
+ ext2_valid_block_bitmap(sb, desc, block_group, bh);
+ /*
+ * file system mounted not to panic on error, continue with corrupt
+ * bitmap
+ */
return bh;
}
@@ -245,11 +246,10 @@ restart:
prev = rsv;
}
printk("Window map complete.\n");
- if (bad)
- BUG();
+ BUG_ON(bad);
}
#define rsv_window_dump(root, verbose) \
- __rsv_window_dump((root), (verbose), __FUNCTION__)
+ __rsv_window_dump((root), (verbose), __func__)
#else
#define rsv_window_dump(root, verbose) do {} while (0)
#endif
@@ -548,7 +548,7 @@ do_more:
for (i = 0, group_freed = 0; i < count; i++) {
if (!ext2_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
bit + i, bitmap_bh->b_data)) {
- ext2_error(sb, __FUNCTION__,
+ ext2_error(sb, __func__,
"bit already cleared for block %lu", block + i);
} else {
group_freed++;
@@ -1381,7 +1381,12 @@ allocated:
"Allocating block in system zone - "
"blocks from "E2FSBLK", length %lu",
ret_block, num);
- goto out;
+ /*
+ * ext2_try_to_allocate marked the blocks we allocated as in
+ * use. So we may want to selectively mark some of the blocks
+ * as free
+ */
+ goto retry_alloc;
}
performed_allocation = 1;
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 8dededd80fe2..a78c6b4af060 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -41,8 +41,8 @@ static inline __le16 ext2_rec_len_to_disk(unsigned len)
{
if (len == (1 << 16))
return cpu_to_le16(EXT2_MAX_REC_LEN);
- else if (len > (1 << 16))
- BUG();
+ else
+ BUG_ON(len > (1 << 16));
return cpu_to_le16(len);
}
@@ -295,11 +295,11 @@ ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
struct page *page = ext2_get_page(inode, n);
if (IS_ERR(page)) {
- ext2_error(sb, __FUNCTION__,
+ ext2_error(sb, __func__,
"bad page in #%lu",
inode->i_ino);
filp->f_pos += PAGE_CACHE_SIZE - offset;
- return -EIO;
+ return PTR_ERR(page);
}
kaddr = page_address(page);
if (unlikely(need_revalidate)) {
@@ -314,7 +314,7 @@ ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
limit = kaddr + ext2_last_byte(inode, n) - EXT2_DIR_REC_LEN(1);
for ( ;(char*)de <= limit; de = ext2_next_entry(de)) {
if (de->rec_len == 0) {
- ext2_error(sb, __FUNCTION__,
+ ext2_error(sb, __func__,
"zero-length directory entry");
ext2_put_page(page);
return -EIO;
@@ -381,7 +381,7 @@ struct ext2_dir_entry_2 * ext2_find_entry (struct inode * dir,
kaddr += ext2_last_byte(dir, n) - reclen;
while ((char *) de <= kaddr) {
if (de->rec_len == 0) {
- ext2_error(dir->i_sb, __FUNCTION__,
+ ext2_error(dir->i_sb, __func__,
"zero-length directory entry");
ext2_put_page(page);
goto out;
@@ -396,7 +396,7 @@ struct ext2_dir_entry_2 * ext2_find_entry (struct inode * dir,
n = 0;
/* next page is past the blocks we've got */
if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) {
- ext2_error(dir->i_sb, __FUNCTION__,
+ ext2_error(dir->i_sb, __func__,
"dir %lu size %lld exceeds block count %llu",
dir->i_ino, dir->i_size,
(unsigned long long)dir->i_blocks);
@@ -506,7 +506,7 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode)
goto got_it;
}
if (de->rec_len == 0) {
- ext2_error(dir->i_sb, __FUNCTION__,
+ ext2_error(dir->i_sb, __func__,
"zero-length directory entry");
err = -EIO;
goto out_unlock;
@@ -578,7 +578,7 @@ int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
while ((char*)de < (char*)dir) {
if (de->rec_len == 0) {
- ext2_error(inode->i_sb, __FUNCTION__,
+ ext2_error(inode->i_sb, __func__,
"zero-length directory entry");
err = -EIO;
goto out;
@@ -670,7 +670,7 @@ int ext2_empty_dir (struct inode * inode)
while ((char *)de <= kaddr) {
if (de->rec_len == 0) {
- ext2_error(inode->i_sb, __FUNCTION__,
+ ext2_error(inode->i_sb, __func__,
"zero-length directory entry");
printk("kaddr=%p, de=%p\n", kaddr, de);
goto not_empty;
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 08f647d8188d..f59741346760 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -75,11 +75,9 @@ static void ext2_release_inode(struct super_block *sb, int group, int dir)
}
spin_lock(sb_bgl_lock(EXT2_SB(sb), group));
- desc->bg_free_inodes_count =
- cpu_to_le16(le16_to_cpu(desc->bg_free_inodes_count) + 1);
+ le16_add_cpu(&desc->bg_free_inodes_count, 1);
if (dir)
- desc->bg_used_dirs_count =
- cpu_to_le16(le16_to_cpu(desc->bg_used_dirs_count) - 1);
+ le16_add_cpu(&desc->bg_used_dirs_count, -1);
spin_unlock(sb_bgl_lock(EXT2_SB(sb), group));
if (dir)
percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter);
@@ -539,13 +537,11 @@ got:
percpu_counter_inc(&sbi->s_dirs_counter);
spin_lock(sb_bgl_lock(sbi, group));
- gdp->bg_free_inodes_count =
- cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
+ le16_add_cpu(&gdp->bg_free_inodes_count, -1);
if (S_ISDIR(mode)) {
if (sbi->s_debts[group] < 255)
sbi->s_debts[group]++;
- gdp->bg_used_dirs_count =
- cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
+ le16_add_cpu(&gdp->bg_used_dirs_count, 1);
} else {
if (sbi->s_debts[group])
sbi->s_debts[group]--;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index b8a2990bab83..384fc0d1dd74 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -254,13 +254,13 @@ no_block:
* Caller must make sure that @ind is valid and will stay that way.
*/
-static unsigned long ext2_find_near(struct inode *inode, Indirect *ind)
+static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
{
struct ext2_inode_info *ei = EXT2_I(inode);
__le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
__le32 *p;
- unsigned long bg_start;
- unsigned long colour;
+ ext2_fsblk_t bg_start;
+ ext2_fsblk_t colour;
/* Try to find previous block */
for (p = ind->p - 1; p >= start; p--)
@@ -275,8 +275,7 @@ static unsigned long ext2_find_near(struct inode *inode, Indirect *ind)
* It is going to be refered from inode itself? OK, just put it into
* the same cylinder group then.
*/
- bg_start = (ei->i_block_group * EXT2_BLOCKS_PER_GROUP(inode->i_sb)) +
- le32_to_cpu(EXT2_SB(inode->i_sb)->s_es->s_first_data_block);
+ bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
colour = (current->pid % 16) *
(EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
return bg_start + colour;
@@ -291,8 +290,8 @@ static unsigned long ext2_find_near(struct inode *inode, Indirect *ind)
* Returns preferred place for a block (the goal).
*/
-static inline int ext2_find_goal(struct inode *inode, long block,
- Indirect *partial)
+static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
+ Indirect *partial)
{
struct ext2_block_alloc_info *block_i;
@@ -796,7 +795,7 @@ const struct address_space_operations ext2_aops = {
const struct address_space_operations ext2_aops_xip = {
.bmap = ext2_bmap,
- .get_xip_page = ext2_get_xip_page,
+ .get_xip_mem = ext2_get_xip_mem,
};
const struct address_space_operations ext2_nobh_aops = {
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 088b011bb97e..ef50cbc792db 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -51,8 +51,7 @@ void ext2_error (struct super_block * sb, const char * function,
if (!(sb->s_flags & MS_RDONLY)) {
sbi->s_mount_state |= EXT2_ERROR_FS;
- es->s_state =
- cpu_to_le16(le16_to_cpu(es->s_state) | EXT2_ERROR_FS);
+ es->s_state |= cpu_to_le16(EXT2_ERROR_FS);
ext2_sync_super(sb, es);
}
@@ -90,7 +89,7 @@ void ext2_update_dynamic_rev(struct super_block *sb)
if (le32_to_cpu(es->s_rev_level) > EXT2_GOOD_OLD_REV)
return;
- ext2_warning(sb, __FUNCTION__,
+ ext2_warning(sb, __func__,
"updating to rev %d because of new feature flag, "
"running e2fsck is recommended",
EXT2_DYNAMIC_REV);
@@ -604,7 +603,7 @@ static int ext2_setup_super (struct super_block * sb,
"running e2fsck is recommended\n");
if (!le16_to_cpu(es->s_max_mnt_count))
es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
- es->s_mnt_count=cpu_to_le16(le16_to_cpu(es->s_mnt_count) + 1);
+ le16_add_cpu(&es->s_mnt_count, 1);
ext2_write_super(sb);
if (test_opt (sb, DEBUG))
printk ("[EXT II FS %s, %s, bs=%lu, fs=%lu, gc=%lu, "
@@ -622,13 +621,13 @@ static int ext2_check_descriptors(struct super_block *sb)
{
int i;
struct ext2_sb_info *sbi = EXT2_SB(sb);
- unsigned long first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
- unsigned long last_block;
ext2_debug ("Checking group descriptors");
for (i = 0; i < sbi->s_groups_count; i++) {
struct ext2_group_desc *gdp = ext2_get_group_desc(sb, i, NULL);
+ ext2_fsblk_t first_block = ext2_group_first_block_no(sb, i);
+ ext2_fsblk_t last_block;
if (i == sbi->s_groups_count - 1)
last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
@@ -664,7 +663,6 @@ static int ext2_check_descriptors(struct super_block *sb)
i, (unsigned long) le32_to_cpu(gdp->bg_inode_table));
return 0;
}
- first_block += EXT2_BLOCKS_PER_GROUP(sb);
}
return 1;
}
@@ -721,10 +719,9 @@ static unsigned long descriptor_loc(struct super_block *sb,
int nr)
{
struct ext2_sb_info *sbi = EXT2_SB(sb);
- unsigned long bg, first_data_block, first_meta_bg;
+ unsigned long bg, first_meta_bg;
int has_super = 0;
- first_data_block = le32_to_cpu(sbi->s_es->s_first_data_block);
first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
if (!EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_META_BG) ||
@@ -733,7 +730,8 @@ static unsigned long descriptor_loc(struct super_block *sb,
bg = sbi->s_desc_per_block * nr;
if (ext2_bg_has_super(sb, bg))
has_super = 1;
- return (first_data_block + has_super + (bg * sbi->s_blocks_per_group));
+
+ return ext2_group_first_block_no(sb, bg) + has_super;
}
static int ext2_fill_super(struct super_block *sb, void *data, int silent)
@@ -1062,7 +1060,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount3;
}
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
- ext2_warning(sb, __FUNCTION__,
+ ext2_warning(sb, __func__,
"mounting ext3 filesystem as ext2");
ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
return 0;
@@ -1126,10 +1124,9 @@ void ext2_write_super (struct super_block * sb)
if (!(sb->s_flags & MS_RDONLY)) {
es = EXT2_SB(sb)->s_es;
- if (le16_to_cpu(es->s_state) & EXT2_VALID_FS) {
+ if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) {
ext2_debug ("setting valid to 0\n");
- es->s_state = cpu_to_le16(le16_to_cpu(es->s_state) &
- ~EXT2_VALID_FS);
+ es->s_state &= cpu_to_le16(~EXT2_VALID_FS);
es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
es->s_mtime = cpu_to_le32(get_seconds());
@@ -1180,7 +1177,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
if (((sbi->s_mount_opt & EXT2_MOUNT_XIP) !=
(old_mount_opt & EXT2_MOUNT_XIP)) &&
invalidate_inodes(sb))
- ext2_warning(sb, __FUNCTION__, "busy inodes while remounting "\
+ ext2_warning(sb, __func__, "busy inodes while remounting "\
"xip remain in cache (no functional problem)");
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
return 0;
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index a99d46f3b26e..987a5261cc2e 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -646,8 +646,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
unlock_buffer(new_bh);
goto cleanup;
}
- HDR(new_bh)->h_refcount = cpu_to_le32(1 +
- le32_to_cpu(HDR(new_bh)->h_refcount));
+ le32_add_cpu(&HDR(new_bh)->h_refcount, 1);
ea_bdebug(new_bh, "refcount now=%d",
le32_to_cpu(HDR(new_bh)->h_refcount));
}
@@ -660,10 +659,8 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
ext2_xattr_cache_insert(new_bh);
} else {
/* We need to allocate a new block */
- int goal = le32_to_cpu(EXT2_SB(sb)->s_es->
- s_first_data_block) +
- EXT2_I(inode)->i_block_group *
- EXT2_BLOCKS_PER_GROUP(sb);
+ ext2_fsblk_t goal = ext2_group_first_block_no(sb,
+ EXT2_I(inode)->i_block_group);
int block = ext2_new_block(inode, goal, &error);
if (error)
goto cleanup;
@@ -731,8 +728,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
bforget(old_bh);
} else {
/* Decrement the refcount only. */
- HDR(old_bh)->h_refcount = cpu_to_le32(
- le32_to_cpu(HDR(old_bh)->h_refcount) - 1);
+ le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
if (ce)
mb_cache_entry_release(ce);
DQUOT_FREE_BLOCK(inode, 1);
@@ -789,8 +785,7 @@ ext2_xattr_delete_inode(struct inode *inode)
bforget(bh);
unlock_buffer(bh);
} else {
- HDR(bh)->h_refcount = cpu_to_le32(
- le32_to_cpu(HDR(bh)->h_refcount) - 1);
+ le32_add_cpu(&HDR(bh)->h_refcount, -1);
if (ce)
mb_cache_entry_release(ce);
ea_bdebug(bh, "refcount now=%d",
diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c
index ca7f00312388..4fb94c20041b 100644
--- a/fs/ext2/xip.c
+++ b/fs/ext2/xip.c
@@ -15,24 +15,28 @@
#include "xip.h"
static inline int
-__inode_direct_access(struct inode *inode, sector_t sector,
- unsigned long *data)
+__inode_direct_access(struct inode *inode, sector_t block,
+ void **kaddr, unsigned long *pfn)
{
- BUG_ON(!inode->i_sb->s_bdev->bd_disk->fops->direct_access);
- return inode->i_sb->s_bdev->bd_disk->fops
- ->direct_access(inode->i_sb->s_bdev,sector,data);
+ struct block_device *bdev = inode->i_sb->s_bdev;
+ struct block_device_operations *ops = bdev->bd_disk->fops;
+ sector_t sector;
+
+ sector = block * (PAGE_SIZE / 512); /* ext2 block to bdev sector */
+
+ BUG_ON(!ops->direct_access);
+ return ops->direct_access(bdev, sector, kaddr, pfn);
}
static inline int
-__ext2_get_sector(struct inode *inode, sector_t offset, int create,
+__ext2_get_block(struct inode *inode, pgoff_t pgoff, int create,
sector_t *result)
{
struct buffer_head tmp;
int rc;
memset(&tmp, 0, sizeof(struct buffer_head));
- rc = ext2_get_block(inode, offset/ (PAGE_SIZE/512), &tmp,
- create);
+ rc = ext2_get_block(inode, pgoff, &tmp, create);
*result = tmp.b_blocknr;
/* did we get a sparse block (hole in the file)? */
@@ -45,15 +49,15 @@ __ext2_get_sector(struct inode *inode, sector_t offset, int create,
}
int
-ext2_clear_xip_target(struct inode *inode, int block)
+ext2_clear_xip_target(struct inode *inode, sector_t block)
{
- sector_t sector = block * (PAGE_SIZE/512);
- unsigned long data;
+ void *kaddr;
+ unsigned long pfn;
int rc;
- rc = __inode_direct_access(inode, sector, &data);
+ rc = __inode_direct_access(inode, block, &kaddr, &pfn);
if (!rc)
- clear_page((void*)data);
+ clear_page(kaddr);
return rc;
}
@@ -64,30 +68,23 @@ void ext2_xip_verify_sb(struct super_block *sb)
if ((sbi->s_mount_opt & EXT2_MOUNT_XIP) &&
!sb->s_bdev->bd_disk->fops->direct_access) {
sbi->s_mount_opt &= (~EXT2_MOUNT_XIP);
- ext2_warning(sb, __FUNCTION__,
+ ext2_warning(sb, __func__,
"ignoring xip option - not supported by bdev");
}
}
-struct page *
-ext2_get_xip_page(struct address_space *mapping, sector_t offset,
- int create)
+int ext2_get_xip_mem(struct address_space *mapping, pgoff_t pgoff, int create,
+ void **kmem, unsigned long *pfn)
{
int rc;
- unsigned long data;
- sector_t sector;
+ sector_t block;
/* first, retrieve the sector number */
- rc = __ext2_get_sector(mapping->host, offset, create, &sector);
+ rc = __ext2_get_block(mapping->host, pgoff, create, &block);
if (rc)
- goto error;
+ return rc;
/* retrieve address of the target data */
- rc = __inode_direct_access
- (mapping->host, sector * (PAGE_SIZE/512), &data);
- if (!rc)
- return virt_to_page(data);
-
- error:
- return ERR_PTR(rc);
+ rc = __inode_direct_access(mapping->host, block, kmem, pfn);
+ return rc;
}
diff --git a/fs/ext2/xip.h b/fs/ext2/xip.h
index aa85331d6c56..18b34d2f31b3 100644
--- a/fs/ext2/xip.h
+++ b/fs/ext2/xip.h
@@ -7,19 +7,20 @@
#ifdef CONFIG_EXT2_FS_XIP
extern void ext2_xip_verify_sb (struct super_block *);
-extern int ext2_clear_xip_target (struct inode *, int);
+extern int ext2_clear_xip_target (struct inode *, sector_t);
static inline int ext2_use_xip (struct super_block *sb)
{
struct ext2_sb_info *sbi = EXT2_SB(sb);
return (sbi->s_mount_opt & EXT2_MOUNT_XIP);
}
-struct page* ext2_get_xip_page (struct address_space *, sector_t, int);
-#define mapping_is_xip(map) unlikely(map->a_ops->get_xip_page)
+int ext2_get_xip_mem(struct address_space *, pgoff_t, int,
+ void **, unsigned long *);
+#define mapping_is_xip(map) unlikely(map->a_ops->get_xip_mem)
#else
#define mapping_is_xip(map) 0
#define ext2_xip_verify_sb(sb) do { } while (0)
#define ext2_use_xip(sb) 0
#define ext2_clear_xip_target(inode, chain) 0
-#define ext2_get_xip_page NULL
+#define ext2_get_xip_mem NULL
#endif
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index da0cb2c0e437..92fd0338a6eb 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -117,7 +117,7 @@ static int ext3_valid_block_bitmap(struct super_block *sb,
return 1;
err_out:
- ext3_error(sb, __FUNCTION__,
+ ext3_error(sb, __func__,
"Invalid block bitmap - "
"block_group = %d, block = %lu",
block_group, bitmap_blk);
@@ -147,7 +147,7 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
- ext3_error(sb, __FUNCTION__,
+ ext3_error(sb, __func__,
"Cannot read block bitmap - "
"block_group = %d, block_bitmap = %u",
block_group, le32_to_cpu(desc->bg_block_bitmap));
@@ -158,16 +158,17 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
if (bh_submit_read(bh) < 0) {
brelse(bh);
- ext3_error(sb, __FUNCTION__,
+ ext3_error(sb, __func__,
"Cannot read block bitmap - "
"block_group = %d, block_bitmap = %u",
block_group, le32_to_cpu(desc->bg_block_bitmap));
return NULL;
}
- if (!ext3_valid_block_bitmap(sb, desc, block_group, bh)) {
- brelse(bh);
- return NULL;
- }
+ ext3_valid_block_bitmap(sb, desc, block_group, bh);
+ /*
+ * file system mounted not to panic on error, continue with corrupt
+ * bitmap
+ */
return bh;
}
/*
@@ -232,11 +233,10 @@ restart:
prev = rsv;
}
printk("Window map complete.\n");
- if (bad)
- BUG();
+ BUG_ON(bad);
}
#define rsv_window_dump(root, verbose) \
- __rsv_window_dump((root), (verbose), __FUNCTION__)
+ __rsv_window_dump((root), (verbose), __func__)
#else
#define rsv_window_dump(root, verbose) do {} while (0)
#endif
@@ -618,7 +618,7 @@ do_more:
if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
bit + i, bitmap_bh->b_data)) {
jbd_unlock_bh_state(bitmap_bh);
- ext3_error(sb, __FUNCTION__,
+ ext3_error(sb, __func__,
"bit already cleared for block "E3FSBLK,
block + i);
jbd_lock_bh_state(bitmap_bh);
@@ -1642,7 +1642,11 @@ allocated:
"Allocating block in system zone - "
"blocks from "E3FSBLK", length %lu",
ret_block, num);
- goto out;
+ /*
+ * claim_block() marked the blocks we allocated as in use. So we
+ * may want to selectively mark some of the blocks as free.
+ */
+ goto retry_alloc;
}
performed_allocation = 1;
@@ -1668,7 +1672,7 @@ allocated:
if (ext3_test_bit(grp_alloc_blk+i,
bh2jh(bitmap_bh)->b_committed_data)) {
printk("%s: block was unexpectedly set in "
- "b_committed_data\n", __FUNCTION__);
+ "b_committed_data\n", __func__);
}
}
}
diff --git a/fs/ext3/ext3_jbd.c b/fs/ext3/ext3_jbd.c
index e1f91fd26a93..d401f148d74d 100644
--- a/fs/ext3/ext3_jbd.c
+++ b/fs/ext3/ext3_jbd.c
@@ -9,7 +9,7 @@ int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
{
int err = journal_get_undo_access(handle, bh);
if (err)
- ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+ ext3_journal_abort_handle(where, __func__, bh, handle,err);
return err;
}
@@ -18,7 +18,7 @@ int __ext3_journal_get_write_access(const char *where, handle_t *handle,
{
int err = journal_get_write_access(handle, bh);
if (err)
- ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+ ext3_journal_abort_handle(where, __func__, bh, handle,err);
return err;
}
@@ -27,7 +27,7 @@ int __ext3_journal_forget(const char *where, handle_t *handle,
{
int err = journal_forget(handle, bh);
if (err)
- ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+ ext3_journal_abort_handle(where, __func__, bh, handle,err);
return err;
}
@@ -36,7 +36,7 @@ int __ext3_journal_revoke(const char *where, handle_t *handle,
{
int err = journal_revoke(handle, blocknr, bh);
if (err)
- ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+ ext3_journal_abort_handle(where, __func__, bh, handle,err);
return err;
}
@@ -45,7 +45,7 @@ int __ext3_journal_get_create_access(const char *where,
{
int err = journal_get_create_access(handle, bh);
if (err)
- ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+ ext3_journal_abort_handle(where, __func__, bh, handle,err);
return err;
}
@@ -54,6 +54,6 @@ int __ext3_journal_dirty_metadata(const char *where,
{
int err = journal_dirty_metadata(handle, bh);
if (err)
- ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+ ext3_journal_abort_handle(where, __func__, bh, handle,err);
return err;
}
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
index a588e23841d4..d33634119e17 100644
--- a/fs/ext3/fsync.c
+++ b/fs/ext3/fsync.c
@@ -72,6 +72,9 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync)
goto out;
}
+ if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+ goto out;
+
/*
* The VFS has written the file data. If the inode is unaltered
* then we need not start a commit.
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 96dd5573e49b..77126821b2e9 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -644,7 +644,7 @@ struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
/* Error cases - e2fsck has already cleaned up for us */
if (ino > max_ino) {
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"bad orphan ino %lu! e2fsck was run?", ino);
goto error;
}
@@ -653,7 +653,7 @@ struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);
bitmap_bh = read_inode_bitmap(sb, block_group);
if (!bitmap_bh) {
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"inode bitmap error for orphan %lu", ino);
goto error;
}
@@ -678,7 +678,7 @@ iget_failed:
err = PTR_ERR(inode);
inode = NULL;
bad_orphan:
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"bad orphan inode %lu! e2fsck was run?", ino);
printk(KERN_NOTICE "ext3_test_bit(bit=%d, block=%llu) = %d\n",
bit, (unsigned long long)bitmap_bh->b_blocknr,
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index c683609b0e3a..cc47b76091bf 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -95,7 +95,7 @@ int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
BUFFER_TRACE(bh, "call ext3_journal_revoke");
err = ext3_journal_revoke(handle, blocknr, bh);
if (err)
- ext3_abort(inode->i_sb, __FUNCTION__,
+ ext3_abort(inode->i_sb, __func__,
"error %d when attempting revoke", err);
BUFFER_TRACE(bh, "exit");
return err;
@@ -1190,7 +1190,7 @@ int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
{
int err = journal_dirty_data(handle, bh);
if (err)
- ext3_journal_abort_handle(__FUNCTION__, __FUNCTION__,
+ ext3_journal_abort_handle(__func__, __func__,
bh, handle, err);
return err;
}
@@ -2454,11 +2454,10 @@ out_stop:
static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
unsigned long ino, struct ext3_iloc *iloc)
{
- unsigned long desc, group_desc, block_group;
+ unsigned long block_group;
unsigned long offset;
ext3_fsblk_t block;
- struct buffer_head *bh;
- struct ext3_group_desc * gdp;
+ struct ext3_group_desc *gdp;
if (!ext3_valid_inum(sb, ino)) {
/*
@@ -2470,27 +2469,15 @@ static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
}
block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
- if (block_group >= EXT3_SB(sb)->s_groups_count) {
- ext3_error(sb,"ext3_get_inode_block","group >= groups count");
- return 0;
- }
- smp_rmb();
- group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
- desc = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
- bh = EXT3_SB(sb)->s_group_desc[group_desc];
- if (!bh) {
- ext3_error (sb, "ext3_get_inode_block",
- "Descriptor not loaded");
+ gdp = ext3_get_group_desc(sb, block_group, NULL);
+ if (!gdp)
return 0;
- }
-
- gdp = (struct ext3_group_desc *)bh->b_data;
/*
* Figure out the offset within the block group inode table
*/
offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
EXT3_INODE_SIZE(sb);
- block = le32_to_cpu(gdp[desc].bg_inode_table) +
+ block = le32_to_cpu(gdp->bg_inode_table) +
(offset >> EXT3_BLOCK_SIZE_BITS(sb));
iloc->block_group = block_group;
@@ -3214,7 +3201,7 @@ void ext3_dirty_inode(struct inode *inode)
current_handle->h_transaction != handle->h_transaction) {
/* This task has a transaction open against a different fs */
printk(KERN_EMERG "%s: transactions do not match!\n",
- __FUNCTION__);
+ __func__);
} else {
jbd_debug(5, "marking dirty. outer handle=%p\n",
current_handle);
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index dec3e0d88ab1..0b8cf80154f1 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -57,10 +57,15 @@ static struct buffer_head *ext3_append(handle_t *handle,
*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
- if ((bh = ext3_bread(handle, inode, *block, 1, err))) {
+ bh = ext3_bread(handle, inode, *block, 1, err);
+ if (bh) {
inode->i_size += inode->i_sb->s_blocksize;
EXT3_I(inode)->i_disksize = inode->i_size;
- ext3_journal_get_write_access(handle,bh);
+ *err = ext3_journal_get_write_access(handle, bh);
+ if (*err) {
+ brelse(bh);
+ bh = NULL;
+ }
}
return bh;
}
@@ -356,7 +361,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
if (root->info.hash_version != DX_HASH_TEA &&
root->info.hash_version != DX_HASH_HALF_MD4 &&
root->info.hash_version != DX_HASH_LEGACY) {
- ext3_warning(dir->i_sb, __FUNCTION__,
+ ext3_warning(dir->i_sb, __func__,
"Unrecognised inode hash code %d",
root->info.hash_version);
brelse(bh);
@@ -370,7 +375,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
hash = hinfo->hash;
if (root->info.unused_flags & 1) {
- ext3_warning(dir->i_sb, __FUNCTION__,
+ ext3_warning(dir->i_sb, __func__,
"Unimplemented inode hash flags: %#06x",
root->info.unused_flags);
brelse(bh);
@@ -379,7 +384,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
}
if ((indirect = root->info.indirect_levels) > 1) {
- ext3_warning(dir->i_sb, __FUNCTION__,
+ ext3_warning(dir->i_sb, __func__,
"Unimplemented inode hash depth: %#06x",
root->info.indirect_levels);
brelse(bh);
@@ -392,7 +397,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
if (dx_get_limit(entries) != dx_root_limit(dir,
root->info.info_length)) {
- ext3_warning(dir->i_sb, __FUNCTION__,
+ ext3_warning(dir->i_sb, __func__,
"dx entry: limit != root limit");
brelse(bh);
*err = ERR_BAD_DX_DIR;
@@ -404,7 +409,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
{
count = dx_get_count(entries);
if (!count || count > dx_get_limit(entries)) {
- ext3_warning(dir->i_sb, __FUNCTION__,
+ ext3_warning(dir->i_sb, __func__,
"dx entry: no count or count > limit");
brelse(bh);
*err = ERR_BAD_DX_DIR;
@@ -449,7 +454,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
goto fail2;
at = entries = ((struct dx_node *) bh->b_data)->entries;
if (dx_get_limit(entries) != dx_node_limit (dir)) {
- ext3_warning(dir->i_sb, __FUNCTION__,
+ ext3_warning(dir->i_sb, __func__,
"dx entry: limit != node limit");
brelse(bh);
*err = ERR_BAD_DX_DIR;
@@ -465,7 +470,7 @@ fail2:
}
fail:
if (*err == ERR_BAD_DX_DIR)
- ext3_warning(dir->i_sb, __FUNCTION__,
+ ext3_warning(dir->i_sb, __func__,
"Corrupt dir inode %ld, running e2fsck is "
"recommended.", dir->i_ino);
return NULL;
@@ -913,7 +918,7 @@ restart:
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
/* read error, skip block & hope for the best */
- ext3_error(sb, __FUNCTION__, "reading directory #%lu "
+ ext3_error(sb, __func__, "reading directory #%lu "
"offset %lu", dir->i_ino, block);
brelse(bh);
goto next;
@@ -1005,7 +1010,7 @@ static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry,
retval = ext3_htree_next_block(dir, hash, frame,
frames, NULL);
if (retval < 0) {
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"error reading index page in directory #%lu",
dir->i_ino);
*err = retval;
@@ -1530,7 +1535,7 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
if (levels && (dx_get_count(frames->entries) ==
dx_get_limit(frames->entries))) {
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"Directory index full!");
err = -ENOSPC;
goto cleanup;
@@ -1832,11 +1837,11 @@ static int empty_dir (struct inode * inode)
if (inode->i_size < EXT3_DIR_REC_LEN(1) + EXT3_DIR_REC_LEN(2) ||
!(bh = ext3_bread (NULL, inode, 0, 0, &err))) {
if (err)
- ext3_error(inode->i_sb, __FUNCTION__,
+ ext3_error(inode->i_sb, __func__,
"error %d reading directory #%lu offset 0",
err, inode->i_ino);
else
- ext3_warning(inode->i_sb, __FUNCTION__,
+ ext3_warning(inode->i_sb, __func__,
"bad directory (dir #%lu) - no data block",
inode->i_ino);
return 1;
@@ -1865,7 +1870,7 @@ static int empty_dir (struct inode * inode)
offset >> EXT3_BLOCK_SIZE_BITS(sb), 0, &err);
if (!bh) {
if (err)
- ext3_error(sb, __FUNCTION__,
+ ext3_error(sb, __func__,
"error %d reading directory"
" #%lu offset %lu",
err, inode->i_ino, offset);
@@ -2318,6 +2323,8 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
EXT3_FEATURE_INCOMPAT_FILETYPE))
new_de->file_type = old_de->file_type;
new_dir->i_version++;
+ new_dir->i_ctime = new_dir->i_mtime = CURRENT_TIME_SEC;
+ ext3_mark_inode_dirty(handle, new_dir);
BUFFER_TRACE(new_bh, "call ext3_journal_dirty_metadata");
ext3_journal_dirty_metadata(handle, new_bh);
brelse(new_bh);
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 0e97b6e07cb0..28cfd0b40527 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -48,60 +48,60 @@ static int verify_group_input(struct super_block *sb,
free_blocks_count, input->reserved_blocks);
if (group != sbi->s_groups_count)
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"Cannot add at group %u (only %lu groups)",
input->group, sbi->s_groups_count);
else if ((start - le32_to_cpu(es->s_first_data_block)) %
EXT3_BLOCKS_PER_GROUP(sb))
- ext3_warning(sb, __FUNCTION__, "Last group not full");
+ ext3_warning(sb, __func__, "Last group not full");
else if (input->reserved_blocks > input->blocks_count / 5)
- ext3_warning(sb, __FUNCTION__, "Reserved blocks too high (%u)",
+ ext3_warning(sb, __func__, "Reserved blocks too high (%u)",
input->reserved_blocks);
else if (free_blocks_count < 0)
- ext3_warning(sb, __FUNCTION__, "Bad blocks count %u",
+ ext3_warning(sb, __func__, "Bad blocks count %u",
input->blocks_count);
else if (!(bh = sb_bread(sb, end - 1)))
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"Cannot read last block ("E3FSBLK")",
end - 1);
else if (outside(input->block_bitmap, start, end))
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"Block bitmap not in group (block %u)",
input->block_bitmap);
else if (outside(input->inode_bitmap, start, end))
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"Inode bitmap not in group (block %u)",
input->inode_bitmap);
else if (outside(input->inode_table, start, end) ||
outside(itend - 1, start, end))
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"Inode table not in group (blocks %u-"E3FSBLK")",
input->inode_table, itend - 1);
else if (input->inode_bitmap == input->block_bitmap)
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"Block bitmap same as inode bitmap (%u)",
input->block_bitmap);
else if (inside(input->block_bitmap, input->inode_table, itend))
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"Block bitmap (%u) in inode table (%u-"E3FSBLK")",
input->block_bitmap, input->inode_table, itend-1);
else if (inside(input->inode_bitmap, input->inode_table, itend))
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"Inode bitmap (%u) in inode table (%u-"E3FSBLK")",
input->inode_bitmap, input->inode_table, itend-1);
else if (inside(input->block_bitmap, start, metaend))
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"Block bitmap (%u) in GDT table"
" ("E3FSBLK"-"E3FSBLK")",
input->block_bitmap, start, metaend - 1);
else if (inside(input->inode_bitmap, start, metaend))
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"Inode bitmap (%u) in GDT table"
" ("E3FSBLK"-"E3FSBLK")",
input->inode_bitmap, start, metaend - 1);
else if (inside(input->inode_table, start, metaend) ||
inside(itend - 1, start, metaend))
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"Inode table (%u-"E3FSBLK") overlaps"
"GDT table ("E3FSBLK"-"E3FSBLK")",
input->inode_table, itend - 1, start, metaend - 1);
@@ -386,7 +386,7 @@ static int verify_reserved_gdb(struct super_block *sb,
while ((grp = ext3_list_backups(sb, &three, &five, &seven)) < end) {
if (le32_to_cpu(*p++) != grp * EXT3_BLOCKS_PER_GROUP(sb) + blk){
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"reserved GDT "E3FSBLK
" missing grp %d ("E3FSBLK")",
blk, grp,
@@ -440,7 +440,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
*/
if (EXT3_SB(sb)->s_sbh->b_blocknr !=
le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) {
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"won't resize using backup superblock at %llu",
(unsigned long long)EXT3_SB(sb)->s_sbh->b_blocknr);
return -EPERM;
@@ -464,7 +464,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
data = (__le32 *)dind->b_data;
if (le32_to_cpu(data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)]) != gdblock) {
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"new group %u GDT block "E3FSBLK" not reserved",
input->group, gdblock);
err = -EINVAL;
@@ -488,7 +488,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
GFP_NOFS);
if (!n_group_desc) {
err = -ENOMEM;
- ext3_warning (sb, __FUNCTION__,
+ ext3_warning (sb, __func__,
"not enough memory for %lu groups", gdb_num + 1);
goto exit_inode;
}
@@ -586,7 +586,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
/* Get each reserved primary GDT block and verify it holds backups */
for (res = 0; res < reserved_gdb; res++, blk++) {
if (le32_to_cpu(*data) != blk) {
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"reserved block "E3FSBLK
" not at offset %ld",
blk,
@@ -730,7 +730,7 @@ static void update_backups(struct super_block *sb,
*/
exit_err:
if (err) {
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"can't update backup for group %d (err %d), "
"forcing fsck on next reboot", group, err);
sbi->s_mount_state &= ~EXT3_VALID_FS;
@@ -770,33 +770,33 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
if (gdb_off == 0 && !EXT3_HAS_RO_COMPAT_FEATURE(sb,
EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"Can't resize non-sparse filesystem further");
return -EPERM;
}
if (le32_to_cpu(es->s_blocks_count) + input->blocks_count <
le32_to_cpu(es->s_blocks_count)) {
- ext3_warning(sb, __FUNCTION__, "blocks_count overflow\n");
+ ext3_warning(sb, __func__, "blocks_count overflow\n");
return -EINVAL;
}
if (le32_to_cpu(es->s_inodes_count) + EXT3_INODES_PER_GROUP(sb) <
le32_to_cpu(es->s_inodes_count)) {
- ext3_warning(sb, __FUNCTION__, "inodes_count overflow\n");
+ ext3_warning(sb, __func__, "inodes_count overflow\n");
return -EINVAL;
}
if (reserved_gdb || gdb_off == 0) {
if (!EXT3_HAS_COMPAT_FEATURE(sb,
EXT3_FEATURE_COMPAT_RESIZE_INODE)){
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"No reserved GDT blocks, can't resize");
return -EPERM;
}
inode = ext3_iget(sb, EXT3_RESIZE_INO);
if (IS_ERR(inode)) {
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"Error opening resize inode");
return PTR_ERR(inode);
}
@@ -825,7 +825,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
lock_super(sb);
if (input->group != sbi->s_groups_count) {
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"multiple resizers run on filesystem!");
err = -EBUSY;
goto exit_journal;
@@ -988,13 +988,13 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
" too large to resize to %lu blocks safely\n",
sb->s_id, n_blocks_count);
if (sizeof(sector_t) < 8)
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"CONFIG_LBD not enabled\n");
return -EINVAL;
}
if (n_blocks_count < o_blocks_count) {
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"can't shrink FS - resize aborted");
return -EBUSY;
}
@@ -1004,7 +1004,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
EXT3_BLOCKS_PER_GROUP(sb);
if (last == 0) {
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"need to use ext2online to resize further");
return -EPERM;
}
@@ -1012,7 +1012,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
add = EXT3_BLOCKS_PER_GROUP(sb) - last;
if (o_blocks_count + add < o_blocks_count) {
- ext3_warning(sb, __FUNCTION__, "blocks_count overflow");
+ ext3_warning(sb, __func__, "blocks_count overflow");
return -EINVAL;
}
@@ -1020,7 +1020,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
add = n_blocks_count - o_blocks_count;
if (o_blocks_count + add < n_blocks_count)
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"will only finish group ("E3FSBLK
" blocks, %u new)",
o_blocks_count + add, add);
@@ -1028,7 +1028,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
/* See if the device is actually as big as what was requested */
bh = sb_bread(sb, o_blocks_count + add -1);
if (!bh) {
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"can't read last block, resize aborted");
return -ENOSPC;
}
@@ -1040,22 +1040,23 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
handle = ext3_journal_start_sb(sb, 3);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
- ext3_warning(sb, __FUNCTION__, "error %d on journal start",err);
+ ext3_warning(sb, __func__, "error %d on journal start",err);
goto exit_put;
}
lock_super(sb);
if (o_blocks_count != le32_to_cpu(es->s_blocks_count)) {
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"multiple resizers run on filesystem!");
unlock_super(sb);
+ ext3_journal_stop(handle);
err = -EBUSY;
goto exit_put;
}
if ((err = ext3_journal_get_write_access(handle,
EXT3_SB(sb)->s_sbh))) {
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"error %d on journal write access", err);
unlock_super(sb);
ext3_journal_stop(handle);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index ad5360664082..fe3119a71ada 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -84,7 +84,7 @@ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
* take the FS itself readonly cleanly. */
journal = EXT3_SB(sb)->s_journal;
if (is_journal_aborted(journal)) {
- ext3_abort(sb, __FUNCTION__,
+ ext3_abort(sb, __func__,
"Detected aborted journal");
return ERR_PTR(-EROFS);
}
@@ -304,7 +304,7 @@ void ext3_update_dynamic_rev(struct super_block *sb)
if (le32_to_cpu(es->s_rev_level) > EXT3_GOOD_OLD_REV)
return;
- ext3_warning(sb, __FUNCTION__,
+ ext3_warning(sb, __func__,
"updating to rev %d because of new feature flag, "
"running e2fsck is recommended",
EXT3_DYNAMIC_REV);
@@ -685,7 +685,8 @@ static int ext3_acquire_dquot(struct dquot *dquot);
static int ext3_release_dquot(struct dquot *dquot);
static int ext3_mark_dquot_dirty(struct dquot *dquot);
static int ext3_write_info(struct super_block *sb, int type);
-static int ext3_quota_on(struct super_block *sb, int type, int format_id, char *path);
+static int ext3_quota_on(struct super_block *sb, int type, int format_id,
+ char *path, int remount);
static int ext3_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off);
@@ -1096,6 +1097,9 @@ clear_qf_name:
case Opt_quota:
case Opt_usrquota:
case Opt_grpquota:
+ printk(KERN_ERR
+ "EXT3-fs: quota options not supported.\n");
+ break;
case Opt_usrjquota:
case Opt_grpjquota:
case Opt_offusrjquota:
@@ -1103,7 +1107,7 @@ clear_qf_name:
case Opt_jqfmt_vfsold:
case Opt_jqfmt_vfsv0:
printk(KERN_ERR
- "EXT3-fs: journalled quota options not "
+ "EXT3-fs: journaled quota options not "
"supported.\n");
break;
case Opt_noquota:
@@ -1218,7 +1222,7 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
inconsistencies, to force a fsck at reboot. But for
a plain journaled filesystem we can keep it set as
valid forever! :) */
- es->s_state = cpu_to_le16(le16_to_cpu(es->s_state) & ~EXT3_VALID_FS);
+ es->s_state &= cpu_to_le16(~EXT3_VALID_FS);
#endif
if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT);
@@ -1253,14 +1257,14 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
static int ext3_check_descriptors(struct super_block *sb)
{
struct ext3_sb_info *sbi = EXT3_SB(sb);
- ext3_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
- ext3_fsblk_t last_block;
int i;
ext3_debug ("Checking group descriptors");
for (i = 0; i < sbi->s_groups_count; i++) {
struct ext3_group_desc *gdp = ext3_get_group_desc(sb, i, NULL);
+ ext3_fsblk_t first_block = ext3_group_first_block_no(sb, i);
+ ext3_fsblk_t last_block;
if (i == sbi->s_groups_count - 1)
last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
@@ -1299,7 +1303,6 @@ static int ext3_check_descriptors(struct super_block *sb)
le32_to_cpu(gdp->bg_inode_table));
return 0;
}
- first_block += EXT3_BLOCKS_PER_GROUP(sb);
}
sbi->s_es->s_free_blocks_count=cpu_to_le32(ext3_count_free_blocks(sb));
@@ -1387,7 +1390,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
if (inode->i_nlink) {
printk(KERN_DEBUG
"%s: truncating inode %lu to %Ld bytes\n",
- __FUNCTION__, inode->i_ino, inode->i_size);
+ __func__, inode->i_ino, inode->i_size);
jbd_debug(2, "truncating inode %lu to %Ld bytes\n",
inode->i_ino, inode->i_size);
ext3_truncate(inode);
@@ -1395,7 +1398,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
} else {
printk(KERN_DEBUG
"%s: deleting unreferenced inode %lu\n",
- __FUNCTION__, inode->i_ino);
+ __func__, inode->i_ino);
jbd_debug(2, "deleting unreferenced inode %lu\n",
inode->i_ino);
nr_orphans++;
@@ -1415,7 +1418,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
/* Turn quotas off */
for (i = 0; i < MAXQUOTAS; i++) {
if (sb_dqopt(sb)->files[i])
- vfs_quota_off(sb, i);
+ vfs_quota_off(sb, i, 0);
}
#endif
sb->s_flags = s_flags; /* Restore MS_RDONLY status */
@@ -2298,9 +2301,9 @@ static void ext3_clear_journal_err(struct super_block * sb,
char nbuf[16];
errstr = ext3_decode_error(sb, j_errno, nbuf);
- ext3_warning(sb, __FUNCTION__, "Filesystem error recorded "
+ ext3_warning(sb, __func__, "Filesystem error recorded "
"from previous mount: %s", errstr);
- ext3_warning(sb, __FUNCTION__, "Marking fs in need of "
+ ext3_warning(sb, __func__, "Marking fs in need of "
"filesystem check.");
EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
@@ -2427,7 +2430,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
}
if (sbi->s_mount_opt & EXT3_MOUNT_ABORT)
- ext3_abort(sb, __FUNCTION__, "Abort forced by user");
+ ext3_abort(sb, __func__, "Abort forced by user");
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
@@ -2639,8 +2642,14 @@ static int ext3_dquot_drop(struct inode *inode)
/* We may delete quota structure so we need to reserve enough blocks */
handle = ext3_journal_start(inode, 2*EXT3_QUOTA_DEL_BLOCKS(inode->i_sb));
- if (IS_ERR(handle))
+ if (IS_ERR(handle)) {
+ /*
+ * We call dquot_drop() anyway to at least release references
+ * to quota structures so that umount does not hang.
+ */
+ dquot_drop(inode);
return PTR_ERR(handle);
+ }
ret = dquot_drop(inode);
err = ext3_journal_stop(handle);
if (!ret)
@@ -2743,17 +2752,17 @@ static int ext3_quota_on_mount(struct super_block *sb, int type)
* Standard function to be called on quota_on
*/
static int ext3_quota_on(struct super_block *sb, int type, int format_id,
- char *path)
+ char *path, int remount)
{
int err;
struct nameidata nd;
if (!test_opt(sb, QUOTA))
return -EINVAL;
- /* Not journalling quota? */
- if (!EXT3_SB(sb)->s_qf_names[USRQUOTA] &&
- !EXT3_SB(sb)->s_qf_names[GRPQUOTA])
- return vfs_quota_on(sb, type, format_id, path);
+ /* Not journalling quota or remount? */
+ if ((!EXT3_SB(sb)->s_qf_names[USRQUOTA] &&
+ !EXT3_SB(sb)->s_qf_names[GRPQUOTA]) || remount)
+ return vfs_quota_on(sb, type, format_id, path, remount);
err = path_lookup(path, LOOKUP_FOLLOW, &nd);
if (err)
return err;
@@ -2762,13 +2771,13 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
path_put(&nd.path);
return -EXDEV;
}
- /* Quotafile not of fs root? */
+ /* Quotafile not in fs root? */
if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode)
printk(KERN_WARNING
"EXT3-fs: Quota file not on filesystem root. "
"Journalled quota will not work.\n");
path_put(&nd.path);
- return vfs_quota_on(sb, type, format_id, path);
+ return vfs_quota_on(sb, type, format_id, path, remount);
}
/* Read data from quotafile - avoid pagecache and such because we cannot afford
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index 42856541e9a5..d4a4f0e9ff69 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -99,6 +99,8 @@ static struct buffer_head *ext3_xattr_cache_find(struct inode *,
struct mb_cache_entry **);
static void ext3_xattr_rehash(struct ext3_xattr_header *,
struct ext3_xattr_entry *);
+static int ext3_xattr_list(struct inode *inode, char *buffer,
+ size_t buffer_size);
static struct mb_cache *ext3_xattr_cache;
@@ -232,7 +234,7 @@ ext3_xattr_block_get(struct inode *inode, int name_index, const char *name,
ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
if (ext3_xattr_check_block(bh)) {
-bad_block: ext3_error(inode->i_sb, __FUNCTION__,
+bad_block: ext3_error(inode->i_sb, __func__,
"inode %lu: bad block "E3FSBLK, inode->i_ino,
EXT3_I(inode)->i_file_acl);
error = -EIO;
@@ -374,7 +376,7 @@ ext3_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
if (ext3_xattr_check_block(bh)) {
- ext3_error(inode->i_sb, __FUNCTION__,
+ ext3_error(inode->i_sb, __func__,
"inode %lu: bad block "E3FSBLK, inode->i_ino,
EXT3_I(inode)->i_file_acl);
error = -EIO;
@@ -427,7 +429,7 @@ cleanup:
* Returns a negative error number on failure, or the number of bytes
* used / required on success.
*/
-int
+static int
ext3_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
{
int i_error, b_error;
@@ -649,7 +651,7 @@ ext3_xattr_block_find(struct inode *inode, struct ext3_xattr_info *i,
atomic_read(&(bs->bh->b_count)),
le32_to_cpu(BHDR(bs->bh)->h_refcount));
if (ext3_xattr_check_block(bs->bh)) {
- ext3_error(sb, __FUNCTION__,
+ ext3_error(sb, __func__,
"inode %lu: bad block "E3FSBLK, inode->i_ino,
EXT3_I(inode)->i_file_acl);
error = -EIO;
@@ -797,10 +799,8 @@ inserted:
get_bh(new_bh);
} else {
/* We need to allocate a new block */
- ext3_fsblk_t goal = le32_to_cpu(
- EXT3_SB(sb)->s_es->s_first_data_block) +
- (ext3_fsblk_t)EXT3_I(inode)->i_block_group *
- EXT3_BLOCKS_PER_GROUP(sb);
+ ext3_fsblk_t goal = ext3_group_first_block_no(sb,
+ EXT3_I(inode)->i_block_group);
ext3_fsblk_t block = ext3_new_block(handle, inode,
goal, &error);
if (error)
@@ -852,7 +852,7 @@ cleanup_dquot:
goto cleanup;
bad_block:
- ext3_error(inode->i_sb, __FUNCTION__,
+ ext3_error(inode->i_sb, __func__,
"inode %lu: bad block "E3FSBLK, inode->i_ino,
EXT3_I(inode)->i_file_acl);
goto cleanup;
@@ -1081,14 +1081,14 @@ ext3_xattr_delete_inode(handle_t *handle, struct inode *inode)
goto cleanup;
bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
if (!bh) {
- ext3_error(inode->i_sb, __FUNCTION__,
+ ext3_error(inode->i_sb, __func__,
"inode %lu: block "E3FSBLK" read error", inode->i_ino,
EXT3_I(inode)->i_file_acl);
goto cleanup;
}
if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
BHDR(bh)->h_blocks != cpu_to_le32(1)) {
- ext3_error(inode->i_sb, __FUNCTION__,
+ ext3_error(inode->i_sb, __func__,
"inode %lu: bad block "E3FSBLK, inode->i_ino,
EXT3_I(inode)->i_file_acl);
goto cleanup;
@@ -1215,7 +1215,7 @@ again:
}
bh = sb_bread(inode->i_sb, ce->e_block);
if (!bh) {
- ext3_error(inode->i_sb, __FUNCTION__,
+ ext3_error(inode->i_sb, __func__,
"inode %lu: block %lu read error",
inode->i_ino, (unsigned long) ce->e_block);
} else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
diff --git a/fs/ext3/xattr.h b/fs/ext3/xattr.h
index 6b1ae1c6182c..148a4dfc82ab 100644
--- a/fs/ext3/xattr.h
+++ b/fs/ext3/xattr.h
@@ -67,7 +67,6 @@ extern struct xattr_handler ext3_xattr_security_handler;
extern ssize_t ext3_listxattr(struct dentry *, char *, size_t);
extern int ext3_xattr_get(struct inode *, int, const char *, void *, size_t);
-extern int ext3_xattr_list(struct inode *, char *, size_t);
extern int ext3_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
extern int ext3_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
@@ -89,12 +88,6 @@ ext3_xattr_get(struct inode *inode, int name_index, const char *name,
}
static inline int
-ext3_xattr_list(struct inode *inode, void *buffer, size_t size)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
ext3_xattr_set(struct inode *inode, int name_index, const char *name,
const void *value, size_t size, int flags)
{
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 13383ba18f1d..c81a8e759bad 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -813,7 +813,8 @@ static int ext4_acquire_dquot(struct dquot *dquot);
static int ext4_release_dquot(struct dquot *dquot);
static int ext4_mark_dquot_dirty(struct dquot *dquot);
static int ext4_write_info(struct super_block *sb, int type);
-static int ext4_quota_on(struct super_block *sb, int type, int format_id, char *path);
+static int ext4_quota_on(struct super_block *sb, int type, int format_id,
+ char *path, int remount);
static int ext4_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off);
@@ -1632,7 +1633,7 @@ static void ext4_orphan_cleanup (struct super_block * sb,
/* Turn quotas off */
for (i = 0; i < MAXQUOTAS; i++) {
if (sb_dqopt(sb)->files[i])
- vfs_quota_off(sb, i);
+ vfs_quota_off(sb, i, 0);
}
#endif
sb->s_flags = s_flags; /* Restore MS_RDONLY status */
@@ -3143,7 +3144,7 @@ static int ext4_quota_on_mount(struct super_block *sb, int type)
* Standard function to be called on quota_on
*/
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
- char *path)
+ char *path, int remount)
{
int err;
struct nameidata nd;
@@ -3151,9 +3152,9 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
if (!test_opt(sb, QUOTA))
return -EINVAL;
/* Not journalling quota? */
- if (!EXT4_SB(sb)->s_qf_names[USRQUOTA] &&
- !EXT4_SB(sb)->s_qf_names[GRPQUOTA])
- return vfs_quota_on(sb, type, format_id, path);
+ if ((!EXT4_SB(sb)->s_qf_names[USRQUOTA] &&
+ !EXT4_SB(sb)->s_qf_names[GRPQUOTA]) || remount)
+ return vfs_quota_on(sb, type, format_id, path, remount);
err = path_lookup(path, LOOKUP_FOLLOW, &nd);
if (err)
return err;
@@ -3168,7 +3169,7 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
"EXT4-fs: Quota file not on filesystem root. "
"Journalled quota will not work.\n");
path_put(&nd.path);
- return vfs_quota_on(sb, type, format_id, path);
+ return vfs_quota_on(sb, type, format_id, path, remount);
}
/* Read data from quotafile - avoid pagecache and such because we cannot afford
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 72cbcd61bd95..486725ee99ae 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -124,8 +124,8 @@ static inline int fat_get_entry(struct inode *dir, loff_t *pos,
* but ignore that right now.
* Ahem... Stack smashing in ring 0 isn't fun. Fixed.
*/
-static int uni16_to_x8(unsigned char *ascii, wchar_t *uni, int uni_xlate,
- struct nls_table *nls)
+static int uni16_to_x8(unsigned char *ascii, wchar_t *uni, int len,
+ int uni_xlate, struct nls_table *nls)
{
wchar_t *ip, ec;
unsigned char *op, nc;
@@ -135,10 +135,11 @@ static int uni16_to_x8(unsigned char *ascii, wchar_t *uni, int uni_xlate,
ip = uni;
op = ascii;
- while (*ip) {
+ while (*ip && ((len - NLS_MAX_CHARSET_SIZE) > 0)) {
ec = *ip++;
if ( (charlen = nls->uni2char(ec, op, NLS_MAX_CHARSET_SIZE)) > 0) {
op += charlen;
+ len -= charlen;
} else {
if (uni_xlate == 1) {
*op = ':';
@@ -149,16 +150,19 @@ static int uni16_to_x8(unsigned char *ascii, wchar_t *uni, int uni_xlate,
ec >>= 4;
}
op += 5;
+ len -= 5;
} else {
*op++ = '?';
+ len--;
}
}
- /* We have some slack there, so it's OK */
- if (op>ascii+256) {
- op = ascii + 256;
- break;
- }
}
+
+ if (unlikely(*ip)) {
+ printk(KERN_WARNING "FAT: filename was truncated while "
+ "converting.");
+ }
+
*op = 0;
return (op - ascii);
}
@@ -243,7 +247,7 @@ static int fat_parse_long(struct inode *dir, loff_t *pos,
unsigned char id, slot, slots, alias_checksum;
if (!*unicode) {
- *unicode = (wchar_t *)__get_free_page(GFP_KERNEL);
+ *unicode = __getname();
if (!*unicode) {
brelse(*bh);
return -ENOMEM;
@@ -311,9 +315,11 @@ int fat_search_long(struct inode *inode, const unsigned char *name,
struct nls_table *nls_io = sbi->nls_io;
struct nls_table *nls_disk = sbi->nls_disk;
wchar_t bufuname[14];
- unsigned char xlate_len, nr_slots;
+ unsigned char nr_slots;
+ int xlate_len;
wchar_t *unicode = NULL;
- unsigned char work[MSDOS_NAME], bufname[260]; /* 256 + 4 */
+ unsigned char work[MSDOS_NAME];
+ unsigned char *bufname = NULL;
int uni_xlate = sbi->options.unicode_xlate;
int utf8 = sbi->options.utf8;
int anycase = (sbi->options.name_check != 's');
@@ -321,6 +327,10 @@ int fat_search_long(struct inode *inode, const unsigned char *name,
loff_t cpos = 0;
int chl, i, j, last_u, err;
+ bufname = __getname();
+ if (!bufname)
+ return -ENOMEM;
+
err = -ENOENT;
while(1) {
if (fat_get_entry(inode, &cpos, &bh, &de) == -1)
@@ -386,8 +396,8 @@ parse_record:
bufuname[last_u] = 0x0000;
xlate_len = utf8
- ?utf8_wcstombs(bufname, bufuname, sizeof(bufname))
- :uni16_to_x8(bufname, bufuname, uni_xlate, nls_io);
+ ?utf8_wcstombs(bufname, bufuname, PATH_MAX)
+ :uni16_to_x8(bufname, bufuname, PATH_MAX, uni_xlate, nls_io);
if (xlate_len == name_len)
if ((!anycase && !memcmp(name, bufname, xlate_len)) ||
(anycase && !nls_strnicmp(nls_io, name, bufname,
@@ -396,8 +406,8 @@ parse_record:
if (nr_slots) {
xlate_len = utf8
- ?utf8_wcstombs(bufname, unicode, sizeof(bufname))
- :uni16_to_x8(bufname, unicode, uni_xlate, nls_io);
+ ?utf8_wcstombs(bufname, unicode, PATH_MAX)
+ :uni16_to_x8(bufname, unicode, PATH_MAX, uni_xlate, nls_io);
if (xlate_len != name_len)
continue;
if ((!anycase && !memcmp(name, bufname, xlate_len)) ||
@@ -416,8 +426,10 @@ Found:
sinfo->i_pos = fat_make_i_pos(sb, sinfo->bh, sinfo->de);
err = 0;
EODir:
+ if (bufname)
+ __putname(bufname);
if (unicode)
- free_page((unsigned long)unicode);
+ __putname(unicode);
return err;
}
@@ -598,7 +610,7 @@ parse_record:
if (isvfat) {
bufuname[j] = 0x0000;
i = utf8 ? utf8_wcstombs(bufname, bufuname, sizeof(bufname))
- : uni16_to_x8(bufname, bufuname, uni_xlate, nls_io);
+ : uni16_to_x8(bufname, bufuname, sizeof(bufname), uni_xlate, nls_io);
}
fill_name = bufname;
@@ -607,10 +619,10 @@ parse_record:
/* convert the unicode long name. 261 is maximum size
* of unicode buffer. (13 * slots + nul) */
void *longname = unicode + 261;
- int buf_size = PAGE_SIZE - (261 * sizeof(unicode[0]));
+ int buf_size = PATH_MAX - (261 * sizeof(unicode[0]));
int long_len = utf8
? utf8_wcstombs(longname, unicode, buf_size)
- : uni16_to_x8(longname, unicode, uni_xlate, nls_io);
+ : uni16_to_x8(longname, unicode, buf_size, uni_xlate, nls_io);
if (!both) {
fill_name = longname;
@@ -640,7 +652,7 @@ EODir:
FillFailed:
brelse(bh);
if (unicode)
- free_page((unsigned long)unicode);
+ __putname(unicode);
out:
unlock_kernel();
return ret;
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 5fb366992b73..13ab763cc510 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -450,7 +450,8 @@ int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2)); /* fixed limit */
lock_fat(sbi);
- if (sbi->free_clusters != -1 && sbi->free_clusters < nr_cluster) {
+ if (sbi->free_clusters != -1 && sbi->free_clus_valid &&
+ sbi->free_clusters < nr_cluster) {
unlock_fat(sbi);
return -ENOSPC;
}
@@ -504,6 +505,7 @@ int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
/* Couldn't allocate the free entries */
sbi->free_clusters = 0;
+ sbi->free_clus_valid = 1;
sb->s_dirt = 1;
err = -ENOSPC;
@@ -583,8 +585,6 @@ error:
brelse(bhs[i]);
unlock_fat(sbi);
- fat_clusters_flush(sb);
-
return err;
}
@@ -615,7 +615,7 @@ int fat_count_free_clusters(struct super_block *sb)
int err = 0, free;
lock_fat(sbi);
- if (sbi->free_clusters != -1)
+ if (sbi->free_clusters != -1 && sbi->free_clus_valid)
goto out;
reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits;
@@ -643,6 +643,7 @@ int fat_count_free_clusters(struct super_block *sb)
} while (fat_ent_next(sbi, &fatent));
}
sbi->free_clusters = free;
+ sbi->free_clus_valid = 1;
sb->s_dirt = 1;
fatent_brelse(&fatent);
out:
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 2a3bed967041..d604bb132422 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -157,104 +157,6 @@ out:
return err;
}
-static int check_mode(const struct msdos_sb_info *sbi, mode_t mode)
-{
- mode_t req = mode & ~S_IFMT;
-
- /*
- * Of the r and x bits, all (subject to umask) must be present. Of the
- * w bits, either all (subject to umask) or none must be present.
- */
-
- if (S_ISREG(mode)) {
- req &= ~sbi->options.fs_fmask;
-
- if ((req & (S_IRUGO | S_IXUGO)) !=
- ((S_IRUGO | S_IXUGO) & ~sbi->options.fs_fmask))
- return -EPERM;
-
- if ((req & S_IWUGO) != 0 &&
- (req & S_IWUGO) != (S_IWUGO & ~sbi->options.fs_fmask))
- return -EPERM;
- } else if (S_ISDIR(mode)) {
- req &= ~sbi->options.fs_dmask;
-
- if ((req & (S_IRUGO | S_IXUGO)) !=
- ((S_IRUGO | S_IXUGO) & ~sbi->options.fs_dmask))
- return -EPERM;
-
- if ((req & S_IWUGO) != 0 &&
- (req & S_IWUGO) != (S_IWUGO & ~sbi->options.fs_dmask))
- return -EPERM;
- } else {
- return -EPERM;
- }
-
- return 0;
-}
-
-int fat_notify_change(struct dentry *dentry, struct iattr *attr)
-{
- struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
- struct inode *inode = dentry->d_inode;
- int mask, error = 0;
-
- lock_kernel();
-
- /*
- * Expand the file. Since inode_setattr() updates ->i_size
- * before calling the ->truncate(), but FAT needs to fill the
- * hole before it.
- */
- if (attr->ia_valid & ATTR_SIZE) {
- if (attr->ia_size > inode->i_size) {
- error = fat_cont_expand(inode, attr->ia_size);
- if (error || attr->ia_valid == ATTR_SIZE)
- goto out;
- attr->ia_valid &= ~ATTR_SIZE;
- }
- }
-
- error = inode_change_ok(inode, attr);
- if (error) {
- if (sbi->options.quiet)
- error = 0;
- goto out;
- }
- if (((attr->ia_valid & ATTR_UID) &&
- (attr->ia_uid != sbi->options.fs_uid)) ||
- ((attr->ia_valid & ATTR_GID) &&
- (attr->ia_gid != sbi->options.fs_gid)))
- error = -EPERM;
-
- if (error) {
- if (sbi->options.quiet)
- error = 0;
- goto out;
- }
-
- if (attr->ia_valid & ATTR_MODE) {
- error = check_mode(sbi, attr->ia_mode);
- if (error != 0 && !sbi->options.quiet)
- goto out;
- }
-
- error = inode_setattr(inode, attr);
- if (error)
- goto out;
-
- if (S_ISDIR(inode->i_mode))
- mask = sbi->options.fs_dmask;
- else
- mask = sbi->options.fs_fmask;
- inode->i_mode &= S_IFMT | (S_IRWXUGO & ~mask);
-out:
- unlock_kernel();
- return error;
-}
-
-EXPORT_SYMBOL_GPL(fat_notify_change);
-
/* Free all clusters after the skip'th cluster. */
static int fat_free(struct inode *inode, int skip)
{
@@ -355,8 +257,112 @@ int fat_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
}
EXPORT_SYMBOL_GPL(fat_getattr);
+static int fat_check_mode(const struct msdos_sb_info *sbi, struct inode *inode,
+ mode_t mode)
+{
+ mode_t mask, req = mode & ~S_IFMT;
+
+ if (S_ISREG(mode))
+ mask = sbi->options.fs_fmask;
+ else
+ mask = sbi->options.fs_dmask;
+
+ /*
+ * Of the r and x bits, all (subject to umask) must be present. Of the
+ * w bits, either all (subject to umask) or none must be present.
+ */
+ req &= ~mask;
+ if ((req & (S_IRUGO | S_IXUGO)) != (inode->i_mode & (S_IRUGO|S_IXUGO)))
+ return -EPERM;
+ if ((req & S_IWUGO) && ((req & S_IWUGO) != (S_IWUGO & ~mask)))
+ return -EPERM;
+
+ return 0;
+}
+
+static int fat_allow_set_time(struct msdos_sb_info *sbi, struct inode *inode)
+{
+ mode_t allow_utime = sbi->options.allow_utime;
+
+ if (current->fsuid != inode->i_uid) {
+ if (in_group_p(inode->i_gid))
+ allow_utime >>= 3;
+ if (allow_utime & MAY_WRITE)
+ return 1;
+ }
+
+ /* use a default check */
+ return 0;
+}
+
+int fat_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
+ struct inode *inode = dentry->d_inode;
+ int mask, error = 0;
+ unsigned int ia_valid;
+
+ lock_kernel();
+
+ /*
+ * Expand the file. Since inode_setattr() updates ->i_size
+ * before calling the ->truncate(), but FAT needs to fill the
+ * hole before it.
+ */
+ if (attr->ia_valid & ATTR_SIZE) {
+ if (attr->ia_size > inode->i_size) {
+ error = fat_cont_expand(inode, attr->ia_size);
+ if (error || attr->ia_valid == ATTR_SIZE)
+ goto out;
+ attr->ia_valid &= ~ATTR_SIZE;
+ }
+ }
+
+ /* Check for setting the inode time. */
+ ia_valid = attr->ia_valid;
+ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) {
+ if (fat_allow_set_time(sbi, inode))
+ attr->ia_valid &= ~(ATTR_MTIME_SET | ATTR_ATIME_SET);
+ }
+
+ error = inode_change_ok(inode, attr);
+ attr->ia_valid = ia_valid;
+ if (error) {
+ if (sbi->options.quiet)
+ error = 0;
+ goto out;
+ }
+ if (((attr->ia_valid & ATTR_UID) &&
+ (attr->ia_uid != sbi->options.fs_uid)) ||
+ ((attr->ia_valid & ATTR_GID) &&
+ (attr->ia_gid != sbi->options.fs_gid)) ||
+ ((attr->ia_valid & ATTR_MODE) &&
+ fat_check_mode(sbi, inode, attr->ia_mode) < 0))
+ error = -EPERM;
+
+ if (error) {
+ if (sbi->options.quiet)
+ error = 0;
+ goto out;
+ }
+
+ error = inode_setattr(inode, attr);
+ if (error)
+ goto out;
+
+ if (S_ISDIR(inode->i_mode))
+ mask = sbi->options.fs_dmask;
+ else
+ mask = sbi->options.fs_fmask;
+ inode->i_mode &= S_IFMT | (S_IRWXUGO & ~mask);
+out:
+ unlock_kernel();
+ return error;
+}
+EXPORT_SYMBOL_GPL(fat_setattr);
+
const struct inode_operations fat_file_inode_operations = {
.truncate = fat_truncate,
- .setattr = fat_notify_change,
+ .setattr = fat_setattr,
.getattr = fat_getattr,
};
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 53f3cf62b7c1..5f522a55b596 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -433,11 +433,8 @@ EXPORT_SYMBOL_GPL(fat_build_inode);
static void fat_delete_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
-
- if (!is_bad_inode(inode)) {
- inode->i_size = 0;
- fat_truncate(inode);
- }
+ inode->i_size = 0;
+ fat_truncate(inode);
clear_inode(inode);
}
@@ -445,8 +442,6 @@ static void fat_clear_inode(struct inode *inode)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
- if (is_bad_inode(inode))
- return;
lock_kernel();
spin_lock(&sbi->inode_hash_lock);
fat_cache_inval_inode(inode);
@@ -542,7 +537,7 @@ static int fat_statfs(struct dentry *dentry, struct kstatfs *buf)
struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
/* If the count of free cluster is still unknown, counts it here. */
- if (sbi->free_clusters == -1) {
+ if (sbi->free_clusters == -1 || !sbi->free_clus_valid) {
int err = fat_count_free_clusters(dentry->d_sb);
if (err)
return err;
@@ -790,6 +785,8 @@ static int fat_show_options(struct seq_file *m, struct vfsmount *mnt)
seq_printf(m, ",gid=%u", opts->fs_gid);
seq_printf(m, ",fmask=%04o", opts->fs_fmask);
seq_printf(m, ",dmask=%04o", opts->fs_dmask);
+ if (opts->allow_utime)
+ seq_printf(m, ",allow_utime=%04o", opts->allow_utime);
if (sbi->nls_disk)
seq_printf(m, ",codepage=%s", sbi->nls_disk->charset);
if (isvfat) {
@@ -845,9 +842,9 @@ static int fat_show_options(struct seq_file *m, struct vfsmount *mnt)
enum {
Opt_check_n, Opt_check_r, Opt_check_s, Opt_uid, Opt_gid,
- Opt_umask, Opt_dmask, Opt_fmask, Opt_codepage, Opt_usefree, Opt_nocase,
- Opt_quiet, Opt_showexec, Opt_debug, Opt_immutable,
- Opt_dots, Opt_nodots,
+ Opt_umask, Opt_dmask, Opt_fmask, Opt_allow_utime, Opt_codepage,
+ Opt_usefree, Opt_nocase, Opt_quiet, Opt_showexec, Opt_debug,
+ Opt_immutable, Opt_dots, Opt_nodots,
Opt_charset, Opt_shortname_lower, Opt_shortname_win95,
Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes,
Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes,
@@ -866,6 +863,7 @@ static match_table_t fat_tokens = {
{Opt_umask, "umask=%o"},
{Opt_dmask, "dmask=%o"},
{Opt_fmask, "fmask=%o"},
+ {Opt_allow_utime, "allow_utime=%o"},
{Opt_codepage, "codepage=%u"},
{Opt_usefree, "usefree"},
{Opt_nocase, "nocase"},
@@ -937,6 +935,7 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug,
opts->fs_uid = current->uid;
opts->fs_gid = current->gid;
opts->fs_fmask = opts->fs_dmask = current->fs->umask;
+ opts->allow_utime = -1;
opts->codepage = fat_default_codepage;
opts->iocharset = fat_default_iocharset;
if (is_vfat)
@@ -1024,6 +1023,11 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug,
return 0;
opts->fs_fmask = option;
break;
+ case Opt_allow_utime:
+ if (match_octal(&args[0], &option))
+ return 0;
+ opts->allow_utime = option & (S_IWGRP | S_IWOTH);
+ break;
case Opt_codepage:
if (match_int(&args[0], &option))
return 0;
@@ -1106,6 +1110,9 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug,
" for FAT filesystems, filesystem will be case sensitive!\n");
}
+ /* If user doesn't specify allow_utime, it's initialized from dmask. */
+ if (opts->allow_utime == (unsigned short)-1)
+ opts->allow_utime = ~opts->fs_dmask & (S_IWGRP | S_IWOTH);
if (opts->unicode_xlate)
opts->utf8 = 0;
@@ -1208,7 +1215,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
*/
media = b->media;
- if (!FAT_VALID_MEDIA(media)) {
+ if (!fat_valid_media(media)) {
if (!silent)
printk(KERN_ERR "FAT: invalid media value (0x%02x)\n",
media);
@@ -1219,7 +1226,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
le16_to_cpu(get_unaligned((__le16 *)&b->sector_size));
if (!is_power_of_2(logical_sector_size)
|| (logical_sector_size < 512)
- || (PAGE_CACHE_SIZE < logical_sector_size)) {
+ || (logical_sector_size > 4096)) {
if (!silent)
printk(KERN_ERR "FAT: bogus logical sector size %u\n",
logical_sector_size);
@@ -1267,6 +1274,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
sbi->fat_length = le16_to_cpu(b->fat_length);
sbi->root_cluster = 0;
sbi->free_clusters = -1; /* Don't know yet */
+ sbi->free_clus_valid = 0;
sbi->prev_free = FAT_START_ENT;
if (!sbi->fat_length && b->fat32_length) {
@@ -1302,8 +1310,8 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
sbi->fsinfo_sector);
} else {
if (sbi->options.usefree)
- sbi->free_clusters =
- le32_to_cpu(fsinfo->free_clusters);
+ sbi->free_clus_valid = 1;
+ sbi->free_clusters = le32_to_cpu(fsinfo->free_clusters);
sbi->prev_free = le32_to_cpu(fsinfo->next_cluster);
}
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index 90a04a6e3789..f55394e57cb2 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -438,7 +438,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
int error;
/*
- * Due to the order of unstuffing files and ->nopage(), we can be
+ * Due to the order of unstuffing files and ->fault(), we can be
* asked for a zero page in the case of a stuffed file being extended,
* so we need to supply one here. It doesn't happen often.
*/
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 6846785fe904..9783723e8ffe 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -504,7 +504,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid,
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
INIT_LIST_HEAD(&inode->i_mapping->private_list);
info = HUGETLBFS_I(inode);
- mpol_shared_policy_init(&info->policy, MPOL_DEFAULT, NULL);
+ mpol_shared_policy_init(&info->policy, NULL);
switch (mode & S_IFMT) {
default:
init_special_inode(inode, mode, dev);
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index a38c7186c570..cd931ef1f000 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -407,22 +407,6 @@ void journal_commit_transaction(journal_t *journal)
jbd_debug (3, "JBD: commit phase 2\n");
/*
- * First, drop modified flag: all accesses to the buffers
- * will be tracked for a new trasaction only -bzzz
- */
- spin_lock(&journal->j_list_lock);
- if (commit_transaction->t_buffers) {
- new_jh = jh = commit_transaction->t_buffers->b_tnext;
- do {
- J_ASSERT_JH(new_jh, new_jh->b_modified == 1 ||
- new_jh->b_modified == 0);
- new_jh->b_modified = 0;
- new_jh = new_jh->b_tnext;
- } while (new_jh != jh);
- }
- spin_unlock(&journal->j_list_lock);
-
- /*
* Now start flushing things to disk, in the order they appear
* on the transaction lists. Data blocks go first.
*/
@@ -488,6 +472,9 @@ void journal_commit_transaction(journal_t *journal)
*/
commit_transaction->t_state = T_COMMIT;
+ J_ASSERT(commit_transaction->t_nr_buffers <=
+ commit_transaction->t_outstanding_credits);
+
descriptor = NULL;
bufs = 0;
while (commit_transaction->t_buffers) {
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 0e081d5f32e8..b99c3b3654c4 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -534,7 +534,7 @@ int log_wait_commit(journal_t *journal, tid_t tid)
if (!tid_geq(journal->j_commit_request, tid)) {
printk(KERN_EMERG
"%s: error: j_commit_request=%d, tid=%d\n",
- __FUNCTION__, journal->j_commit_request, tid);
+ __func__, journal->j_commit_request, tid);
}
spin_unlock(&journal->j_state_lock);
#endif
@@ -599,7 +599,7 @@ int journal_bmap(journal_t *journal, unsigned long blocknr,
printk(KERN_ALERT "%s: journal block not found "
"at offset %lu on %s\n",
- __FUNCTION__,
+ __func__,
blocknr,
bdevname(journal->j_dev, b));
err = -EIO;
@@ -728,7 +728,7 @@ journal_t * journal_init_dev(struct block_device *bdev,
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
if (!journal->j_wbuf) {
printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
- __FUNCTION__);
+ __func__);
kfree(journal);
journal = NULL;
goto out;
@@ -782,7 +782,7 @@ journal_t * journal_init_inode (struct inode *inode)
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
if (!journal->j_wbuf) {
printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
- __FUNCTION__);
+ __func__);
kfree(journal);
return NULL;
}
@@ -791,7 +791,7 @@ journal_t * journal_init_inode (struct inode *inode)
/* If that failed, give up */
if (err) {
printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
- __FUNCTION__);
+ __func__);
kfree(journal);
return NULL;
}
@@ -877,7 +877,7 @@ int journal_create(journal_t *journal)
*/
printk(KERN_EMERG
"%s: creation of journal on external device!\n",
- __FUNCTION__);
+ __func__);
BUG();
}
@@ -1657,7 +1657,7 @@ static struct journal_head *journal_alloc_journal_head(void)
jbd_debug(1, "out of memory for journal_head\n");
if (time_after(jiffies, last_warning + 5*HZ)) {
printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
- __FUNCTION__);
+ __func__);
last_warning = jiffies;
}
while (ret == NULL) {
@@ -1794,13 +1794,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
if (jh->b_frozen_data) {
printk(KERN_WARNING "%s: freeing "
"b_frozen_data\n",
- __FUNCTION__);
+ __func__);
jbd_free(jh->b_frozen_data, bh->b_size);
}
if (jh->b_committed_data) {
printk(KERN_WARNING "%s: freeing "
"b_committed_data\n",
- __FUNCTION__);
+ __func__);
jbd_free(jh->b_committed_data, bh->b_size);
}
bh->b_private = NULL;
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index d5f8eee7c88c..1bb43e987f4b 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -138,7 +138,7 @@ repeat:
oom:
if (!journal_oom_retry)
return -ENOMEM;
- jbd_debug(1, "ENOMEM in %s, retrying\n", __FUNCTION__);
+ jbd_debug(1, "ENOMEM in %s, retrying\n", __func__);
yield();
goto repeat;
}
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 2c9e8f5d13aa..67ff2024c23c 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -609,6 +609,12 @@ repeat:
goto done;
/*
+ * this is the first time this transaction is touching this buffer,
+ * reset the modified flag
+ */
+ jh->b_modified = 0;
+
+ /*
* If there is already a copy-out version of this buffer, then we don't
* need to make another one
*/
@@ -681,7 +687,7 @@ repeat:
if (!frozen_buffer) {
printk(KERN_EMERG
"%s: OOM for frozen_buffer\n",
- __FUNCTION__);
+ __func__);
JBUFFER_TRACE(jh, "oom!");
error = -ENOMEM;
jbd_lock_bh_state(bh);
@@ -820,9 +826,16 @@ int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
if (jh->b_transaction == NULL) {
jh->b_transaction = transaction;
+
+ /* first access by this transaction */
+ jh->b_modified = 0;
+
JBUFFER_TRACE(jh, "file as BJ_Reserved");
__journal_file_buffer(jh, transaction, BJ_Reserved);
} else if (jh->b_transaction == journal->j_committing_transaction) {
+ /* first access by this transaction */
+ jh->b_modified = 0;
+
JBUFFER_TRACE(jh, "set next transaction");
jh->b_next_transaction = transaction;
}
@@ -891,7 +904,7 @@ repeat:
committed_data = jbd_alloc(jh2bh(jh)->b_size, GFP_NOFS);
if (!committed_data) {
printk(KERN_EMERG "%s: No memory for committed data\n",
- __FUNCTION__);
+ __func__);
err = -ENOMEM;
goto out;
}
@@ -1222,6 +1235,7 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
struct journal_head *jh;
int drop_reserve = 0;
int err = 0;
+ int was_modified = 0;
BUFFER_TRACE(bh, "entry");
@@ -1240,6 +1254,9 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
goto not_jbd;
}
+ /* keep track of wether or not this transaction modified us */
+ was_modified = jh->b_modified;
+
/*
* The buffer's going from the transaction, we must drop
* all references -bzzz
@@ -1257,7 +1274,12 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
- drop_reserve = 1;
+ /*
+ * we only want to drop a reference if this transaction
+ * modified the buffer
+ */
+ if (was_modified)
+ drop_reserve = 1;
/*
* We are no longer going to journal this buffer.
@@ -1297,7 +1319,13 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
if (jh->b_next_transaction) {
J_ASSERT(jh->b_next_transaction == transaction);
jh->b_next_transaction = NULL;
- drop_reserve = 1;
+
+ /*
+ * only drop a reference if this transaction modified
+ * the buffer
+ */
+ if (was_modified)
+ drop_reserve = 1;
}
}
@@ -2069,7 +2097,7 @@ void __journal_refile_buffer(struct journal_head *jh)
jh->b_transaction = jh->b_next_transaction;
jh->b_next_transaction = NULL;
__journal_file_buffer(jh, jh->b_transaction,
- was_dirty ? BJ_Metadata : BJ_Reserved);
+ jh->b_modified ? BJ_Metadata : BJ_Reserved);
J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
if (was_dirty)
diff --git a/fs/msdos/namei.c b/fs/msdos/namei.c
index 30f7d0ae2215..2d4358c59f68 100644
--- a/fs/msdos/namei.c
+++ b/fs/msdos/namei.c
@@ -653,7 +653,7 @@ static const struct inode_operations msdos_dir_inode_operations = {
.mkdir = msdos_mkdir,
.rmdir = msdos_rmdir,
.rename = msdos_rename,
- .setattr = fat_notify_change,
+ .setattr = fat_setattr,
.getattr = fat_getattr,
};
diff --git a/fs/namespace.c b/fs/namespace.c
index f48f98110c30..fe376805cf5f 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -14,7 +14,6 @@
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/quotaops.h>
#include <linux/acct.h>
#include <linux/capability.h>
#include <linux/cpumask.h>
@@ -1084,7 +1083,6 @@ static int do_umount(struct vfsmount *mnt, int flags)
down_write(&sb->s_umount);
if (!(sb->s_flags & MS_RDONLY)) {
lock_kernel();
- DQUOT_OFF(sb);
retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
unlock_kernel();
}
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index fbbb9f7afa1a..2e5ab1204dec 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -107,12 +107,6 @@ static const struct super_operations ncp_sops =
.show_options = ncp_show_options,
};
-extern struct dentry_operations ncp_root_dentry_operations;
-#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
-extern const struct address_space_operations ncp_symlink_aops;
-extern int ncp_symlink(struct inode*, struct dentry*, const char*);
-#endif
-
/*
* Fill in the ncpfs-specific information in the inode.
*/
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index ad8f167e54bc..3a97c95e1ca2 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -389,11 +389,11 @@ static int __ncp_ioctl(struct inode *inode, struct file *filp,
struct dentry* dentry = inode->i_sb->s_root;
if (dentry) {
- struct inode* inode = dentry->d_inode;
+ struct inode* s_inode = dentry->d_inode;
- if (inode) {
- sr.volNumber = NCP_FINFO(inode)->volNumber;
- sr.dirEntNum = NCP_FINFO(inode)->dirEntNum;
+ if (s_inode) {
+ sr.volNumber = NCP_FINFO(s_inode)->volNumber;
+ sr.dirEntNum = NCP_FINFO(s_inode)->dirEntNum;
sr.namespace = server->name_space[sr.volNumber];
} else
DPRINTK("ncpfs: s_root->d_inode==NULL\n");
@@ -439,12 +439,12 @@ static int __ncp_ioctl(struct inode *inode, struct file *filp,
dentry = inode->i_sb->s_root;
server->root_setuped = 1;
if (dentry) {
- struct inode* inode = dentry->d_inode;
+ struct inode* s_inode = dentry->d_inode;
if (inode) {
- NCP_FINFO(inode)->volNumber = vnum;
- NCP_FINFO(inode)->dirEntNum = de;
- NCP_FINFO(inode)->DosDirNum = dosde;
+ NCP_FINFO(s_inode)->volNumber = vnum;
+ NCP_FINFO(s_inode)->dirEntNum = de;
+ NCP_FINFO(s_inode)->DosDirNum = dosde;
} else
DPRINTK("ncpfs: s_root->d_inode==NULL\n");
} else
@@ -519,7 +519,6 @@ static int __ncp_ioctl(struct inode *inode, struct file *filp,
}
{
struct ncp_lock_ioctl rqdata;
- int result;
if (copy_from_user(&rqdata, argp, sizeof(rqdata)))
return -EFAULT;
diff --git a/fs/ncpfs/ncpsign_kernel.c b/fs/ncpfs/ncpsign_kernel.c
index 749a18d33599..7c0b5c21e6cf 100644
--- a/fs/ncpfs/ncpsign_kernel.c
+++ b/fs/ncpfs/ncpsign_kernel.c
@@ -55,7 +55,7 @@ static void nwsign(char *r_data1, char *r_data2, char *outdata) {
unsigned int w0,w1,w2,w3;
static int rbit[4]={0, 2, 1, 3};
#ifdef __i386__
- unsigned int *data2=(int *)r_data2;
+ unsigned int *data2=(unsigned int *)r_data2;
#else
unsigned int data2[16];
for (i=0;i<16;i++)
diff --git a/fs/open.c b/fs/open.c
index b70e7666bb2c..7af1f05d5978 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -837,7 +837,7 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
if (f->f_flags & O_DIRECT) {
if (!f->f_mapping->a_ops ||
((!f->f_mapping->a_ops->direct_IO) &&
- (!f->f_mapping->a_ops->get_xip_page))) {
+ (!f->f_mapping->a_ops->get_xip_mem))) {
fput(f);
f = ERR_PTR(-EINVAL);
}
diff --git a/fs/partitions/msdos.c b/fs/partitions/msdos.c
index 5567ec0d03a3..796511886f28 100644
--- a/fs/partitions/msdos.c
+++ b/fs/partitions/msdos.c
@@ -18,7 +18,7 @@
*
* Re-organised Feb 1998 Russell King
*/
-
+#include <linux/msdos_fs.h>
#include "check.h"
#include "msdos.h"
@@ -419,6 +419,7 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
Sector sect;
unsigned char *data;
struct partition *p;
+ struct fat_boot_sector *fb;
int slot;
data = read_dev_sector(bdev, 0, &sect);
@@ -444,8 +445,21 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
p = (struct partition *) (data + 0x1be);
for (slot = 1; slot <= 4; slot++, p++) {
if (p->boot_ind != 0 && p->boot_ind != 0x80) {
- put_dev_sector(sect);
- return 0;
+ /*
+ * Even without a valid boot inidicator value
+ * its still possible this is valid FAT filesystem
+ * without a partition table.
+ */
+ fb = (struct fat_boot_sector *) data;
+ if (slot == 1 && fb->reserved && fb->fats
+ && fat_valid_media(fb->media)) {
+ printk("\n");
+ put_dev_sector(sect);
+ return 1;
+ } else {
+ put_dev_sector(sect);
+ return 0;
+ }
}
}
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 2d563979cb02..441a32f0e5f2 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -456,6 +456,20 @@ static const struct file_operations proc_slabstats_operations = {
#endif
#endif
+#ifdef CONFIG_MMU
+static int vmalloc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &vmalloc_op);
+}
+
+static const struct file_operations proc_vmalloc_operations = {
+ .open = vmalloc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+#endif
+
static int show_stat(struct seq_file *p, void *v)
{
int i;
@@ -869,6 +883,9 @@ void __init proc_misc_init(void)
create_seq_entry("slab_allocators", 0 ,&proc_slabstats_operations);
#endif
#endif
+#ifdef CONFIG_MMU
+ proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
+#endif
create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations);
create_seq_entry("pagetypeinfo", S_IRUGO, &pagetypeinfo_file_ops);
create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9dfb5ff24209..7415eeb7cc3a 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -338,8 +338,7 @@ const struct file_operations proc_maps_operations = {
#define PSS_SHIFT 12
#ifdef CONFIG_PROC_PAGE_MONITOR
-struct mem_size_stats
-{
+struct mem_size_stats {
struct vm_area_struct *vma;
unsigned long resident;
unsigned long shared_clean;
@@ -347,6 +346,7 @@ struct mem_size_stats
unsigned long private_clean;
unsigned long private_dirty;
unsigned long referenced;
+ unsigned long swap;
u64 pss;
};
@@ -363,6 +363,12 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
ptent = *pte;
+
+ if (is_swap_pte(ptent)) {
+ mss->swap += PAGE_SIZE;
+ continue;
+ }
+
if (!pte_present(ptent))
continue;
@@ -421,7 +427,8 @@ static int show_smap(struct seq_file *m, void *v)
"Shared_Dirty: %8lu kB\n"
"Private_Clean: %8lu kB\n"
"Private_Dirty: %8lu kB\n"
- "Referenced: %8lu kB\n",
+ "Referenced: %8lu kB\n"
+ "Swap: %8lu kB\n",
(vma->vm_end - vma->vm_start) >> 10,
mss.resident >> 10,
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
@@ -429,7 +436,8 @@ static int show_smap(struct seq_file *m, void *v)
mss.shared_dirty >> 10,
mss.private_clean >> 10,
mss.private_dirty >> 10,
- mss.referenced >> 10);
+ mss.referenced >> 10,
+ mss.swap >> 10);
return ret;
}
@@ -579,7 +587,7 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end,
return err;
}
-u64 swap_pte_to_pagemap_entry(pte_t pte)
+static u64 swap_pte_to_pagemap_entry(pte_t pte)
{
swp_entry_t e = pte_to_swp_entry(pte);
return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
diff --git a/fs/quota.c b/fs/quota.c
index 84f28dd72116..db1cc9f3c7aa 100644
--- a/fs/quota.c
+++ b/fs/quota.c
@@ -69,7 +69,6 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid
switch (cmd) {
case Q_GETFMT:
case Q_GETINFO:
- case Q_QUOTAOFF:
case Q_SETINFO:
case Q_SETQUOTA:
case Q_GETQUOTA:
@@ -229,12 +228,12 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
if (IS_ERR(pathname = getname(addr)))
return PTR_ERR(pathname);
- ret = sb->s_qcop->quota_on(sb, type, id, pathname);
+ ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0);
putname(pathname);
return ret;
}
case Q_QUOTAOFF:
- return sb->s_qcop->quota_off(sb, type);
+ return sb->s_qcop->quota_off(sb, type, 0);
case Q_GETFMT: {
__u32 fmt;
diff --git a/fs/quota_v1.c b/fs/quota_v1.c
index f3841f233069..a6cf9269105c 100644
--- a/fs/quota_v1.c
+++ b/fs/quota_v1.c
@@ -139,6 +139,9 @@ static int v1_read_file_info(struct super_block *sb, int type)
goto out;
}
ret = 0;
+ /* limits are stored as unsigned 32-bit data */
+ dqopt->info[type].dqi_maxblimit = 0xffffffff;
+ dqopt->info[type].dqi_maxilimit = 0xffffffff;
dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
out:
diff --git a/fs/quota_v2.c b/fs/quota_v2.c
index c519a583e681..23b647f25d08 100644
--- a/fs/quota_v2.c
+++ b/fs/quota_v2.c
@@ -59,6 +59,9 @@ static int v2_read_file_info(struct super_block *sb, int type)
sb->s_id);
return -1;
}
+ /* limits are stored as unsigned 32-bit data */
+ info->dqi_maxblimit = 0xffffffff;
+ info->dqi_maxilimit = 0xffffffff;
info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
info->dqi_flags = le32_to_cpu(dinfo.dqi_flags);
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index f491ceb5af02..4646caa60455 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -479,7 +479,7 @@ static void __discard_prealloc(struct reiserfs_transaction_handle *th,
if (ei->i_prealloc_count < 0)
reiserfs_warning(th->t_super,
"zam-4001:%s: inode has negative prealloc blocks count.",
- __FUNCTION__);
+ __func__);
#endif
while (ei->i_prealloc_count > 0) {
reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
@@ -517,7 +517,7 @@ void reiserfs_discard_all_prealloc(struct reiserfs_transaction_handle *th)
if (!ei->i_prealloc_count) {
reiserfs_warning(th->t_super,
"zam-4001:%s: inode is in prealloc list but has no preallocated blocks.",
- __FUNCTION__);
+ __func__);
}
#endif
__discard_prealloc(th, ei);
@@ -632,7 +632,7 @@ int reiserfs_parse_alloc_options(struct super_block *s, char *options)
}
reiserfs_warning(s, "zam-4001: %s : unknown option - %s",
- __FUNCTION__, this_char);
+ __func__, this_char);
return 1;
}
@@ -1254,7 +1254,7 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
bh = sb_bread(sb, block);
if (bh == NULL)
reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) "
- "reading failed", __FUNCTION__, block);
+ "reading failed", __func__, block);
else {
if (buffer_locked(bh)) {
PROC_INFO_INC(sb, scan_bitmap.wait);
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 7ee4208793b6..2f87f5b14630 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -1464,29 +1464,29 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
} else
/* item falls wholly into S_new[i] */
{
- int ret_val;
+ int leaf_mi;
struct item_head *pasted;
#ifdef CONFIG_REISERFS_CHECK
- struct item_head *ih =
+ struct item_head *ih_check =
B_N_PITEM_HEAD(tbS0, item_pos);
- if (!is_direntry_le_ih(ih)
- && (pos_in_item != ih_item_len(ih)
+ if (!is_direntry_le_ih(ih_check)
+ && (pos_in_item != ih_item_len(ih_check)
|| tb->insert_size[0] <= 0))
reiserfs_panic(tb->tb_sb,
"PAP-12235: balance_leaf: pos_in_item must be equal to ih_item_len");
#endif /* CONFIG_REISERFS_CHECK */
- ret_val =
+ leaf_mi =
leaf_move_items(LEAF_FROM_S_TO_SNEW,
tb, snum[i],
sbytes[i],
S_new[i]);
- RFALSE(ret_val,
+ RFALSE(leaf_mi,
"PAP-12240: unexpected value returned by leaf_move_items (%d)",
- ret_val);
+ leaf_mi);
/* paste into item */
bi.tb = tb;
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 74363a7aacbc..830332021ed4 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -12,8 +12,6 @@
#include <linux/smp_lock.h>
#include <linux/compat.h>
-static int reiserfs_unpack(struct inode *inode, struct file *filp);
-
/*
** reiserfs_ioctl - handler for ioctl for inode
** supported commands:
@@ -159,7 +157,7 @@ int reiserfs_prepare_write(struct file *f, struct page *page,
** Function try to convert tail from direct item into indirect.
** It set up nopack attribute in the REISERFS_I(inode)->nopack
*/
-static int reiserfs_unpack(struct inode *inode, struct file *filp)
+int reiserfs_unpack(struct inode *inode, struct file *filp)
{
int retval = 0;
int index;
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 060eb3f598e7..da86042b3e03 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1187,7 +1187,7 @@ static int flush_commit_list(struct super_block *s,
if (retval)
reiserfs_abort(s, retval, "Journal write error in %s",
- __FUNCTION__);
+ __func__);
put_fs_excl();
return retval;
}
@@ -1534,7 +1534,7 @@ static int flush_journal_list(struct super_block *s,
reiserfs_warning(s,
"clm-2082: Unable to flush buffer %llu in %s",
(unsigned long long)saved_bh->
- b_blocknr, __FUNCTION__);
+ b_blocknr, __func__);
}
free_cnode:
last = cn;
@@ -1586,7 +1586,7 @@ static int flush_journal_list(struct super_block *s,
if (err)
reiserfs_abort(s, -EIO,
"Write error while pushing transaction to disk in %s",
- __FUNCTION__);
+ __func__);
flush_older_and_return:
/* before we can update the journal header block, we _must_ flush all
@@ -1616,7 +1616,7 @@ static int flush_journal_list(struct super_block *s,
if (err)
reiserfs_abort(s, -EIO,
"Write error while updating journal header in %s",
- __FUNCTION__);
+ __func__);
}
remove_all_from_journal_list(s, jl, 0);
list_del_init(&jl->j_list);
@@ -4316,5 +4316,5 @@ static void __reiserfs_journal_abort_soft(struct super_block *sb, int errno)
void reiserfs_journal_abort(struct super_block *sb, int errno)
{
- return __reiserfs_journal_abort_soft(sb, errno);
+ __reiserfs_journal_abort_soft(sb, errno);
}
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 8867533cb727..c1add28dd45e 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -301,7 +301,7 @@ static int reiserfs_find_entry(struct inode *dir, const char *name, int namelen,
path_to_entry, de);
if (retval == IO_ERROR) {
reiserfs_warning(dir->i_sb, "zam-7001: io error in %s",
- __FUNCTION__);
+ __func__);
return IO_ERROR;
}
@@ -496,7 +496,7 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th,
reiserfs_warning(dir->i_sb,
"zam-7002:%s: \"reiserfs_find_entry\" "
"has returned unexpected value (%d)",
- __FUNCTION__, retval);
+ __func__, retval);
}
return -EEXIST;
@@ -907,7 +907,7 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry)
if (inode->i_nlink != 2 && inode->i_nlink != 1)
reiserfs_warning(inode->i_sb, "%s: empty directory has nlink "
- "!= 2 (%d)", __FUNCTION__, inode->i_nlink);
+ "!= 2 (%d)", __func__, inode->i_nlink);
clear_nlink(inode);
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
@@ -984,7 +984,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
if (!inode->i_nlink) {
reiserfs_warning(inode->i_sb, "%s: deleting nonexistent file "
- "(%s:%lu), %d", __FUNCTION__,
+ "(%s:%lu), %d", __func__,
reiserfs_bdevname(inode->i_sb), inode->i_ino,
inode->i_nlink);
inode->i_nlink = 1;
diff --git a/fs/reiserfs/objectid.c b/fs/reiserfs/objectid.c
index 65feba4deb69..ea0cf8c28a99 100644
--- a/fs/reiserfs/objectid.c
+++ b/fs/reiserfs/objectid.c
@@ -61,7 +61,7 @@ __u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th)
/* comment needed -Hans */
unused_objectid = le32_to_cpu(map[1]);
if (unused_objectid == U32_MAX) {
- reiserfs_warning(s, "%s: no more object ids", __FUNCTION__);
+ reiserfs_warning(s, "%s: no more object ids", __func__);
reiserfs_restore_prepared_buffer(s, SB_BUFFER_WITH_SB(s));
return 0;
}
@@ -114,7 +114,7 @@ void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
if (objectid_to_release == le32_to_cpu(map[i])) {
/* This incrementation unallocates the objectid. */
//map[i]++;
- map[i] = cpu_to_le32(le32_to_cpu(map[i]) + 1);
+ le32_add_cpu(&map[i], 1);
/* Did we unallocate the last member of an odd sequence, and can shrink oids? */
if (map[i] == map[i + 1]) {
@@ -138,8 +138,7 @@ void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
/* size of objectid map is not changed */
if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) {
//objectid_map[i+1]--;
- map[i + 1] =
- cpu_to_le32(le32_to_cpu(map[i + 1]) - 1);
+ le32_add_cpu(&map[i + 1], -1);
return;
}
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index d2db2417b2bd..abbc64dcc8d4 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1419,8 +1419,7 @@ int reiserfs_delete_object(struct reiserfs_transaction_handle *th,
inode_generation =
&REISERFS_SB(th->t_super)->s_rs->s_inode_generation;
- *inode_generation =
- cpu_to_le32(le32_to_cpu(*inode_generation) + 1);
+ le32_add_cpu(inode_generation, 1);
}
/* USE_INODE_GENERATION_COUNTER */
#endif
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 393cc22c1717..ed424d708e69 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -304,7 +304,7 @@ static int finish_unfinished(struct super_block *s)
/* Turn quotas off */
for (i = 0; i < MAXQUOTAS; i++) {
if (sb_dqopt(s)->files[i])
- vfs_quota_off_mount(s, i);
+ vfs_quota_off(s, i, 0);
}
if (ms_active_set)
/* Restore the flag back */
@@ -634,7 +634,7 @@ static int reiserfs_acquire_dquot(struct dquot *);
static int reiserfs_release_dquot(struct dquot *);
static int reiserfs_mark_dquot_dirty(struct dquot *);
static int reiserfs_write_info(struct super_block *, int);
-static int reiserfs_quota_on(struct super_block *, int, int, char *);
+static int reiserfs_quota_on(struct super_block *, int, int, char *, int);
static struct dquot_operations reiserfs_quota_operations = {
.initialize = reiserfs_dquot_initialize,
@@ -1890,8 +1890,14 @@ static int reiserfs_dquot_drop(struct inode *inode)
ret =
journal_begin(&th, inode->i_sb,
2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
- if (ret)
+ if (ret) {
+ /*
+ * We call dquot_drop() anyway to at least release references
+ * to quota structures so that umount does not hang.
+ */
+ dquot_drop(inode);
goto out;
+ }
ret = dquot_drop(inode);
err =
journal_end(&th, inode->i_sb,
@@ -2015,13 +2021,17 @@ static int reiserfs_quota_on_mount(struct super_block *sb, int type)
* Standard function to be called on quota_on
*/
static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
- char *path)
+ char *path, int remount)
{
int err;
struct nameidata nd;
+ struct inode *inode;
if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA)))
return -EINVAL;
+ /* No more checks needed? Path and format_id are bogus anyway... */
+ if (remount)
+ return vfs_quota_on(sb, type, format_id, path, 1);
err = path_lookup(path, LOOKUP_FOLLOW, &nd);
if (err)
return err;
@@ -2030,18 +2040,24 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
path_put(&nd.path);
return -EXDEV;
}
+ inode = nd.path.dentry->d_inode;
/* We must not pack tails for quota files on reiserfs for quota IO to work */
- if (!(REISERFS_I(nd.path.dentry->d_inode)->i_flags & i_nopack_mask)) {
- reiserfs_warning(sb,
- "reiserfs: Quota file must have tail packing disabled.");
- path_put(&nd.path);
- return -EINVAL;
+ if (!(REISERFS_I(inode)->i_flags & i_nopack_mask)) {
+ err = reiserfs_unpack(inode, NULL);
+ if (err) {
+ reiserfs_warning(sb,
+ "reiserfs: Unpacking tail of quota file failed"
+ " (%d). Cannot turn on quotas.", err);
+ path_put(&nd.path);
+ return -EINVAL;
+ }
+ mark_inode_dirty(inode);
}
/* Not journalling quota? No more tests needed... */
if (!REISERFS_SB(sb)->s_qf_names[USRQUOTA] &&
!REISERFS_SB(sb)->s_qf_names[GRPQUOTA]) {
path_put(&nd.path);
- return vfs_quota_on(sb, type, format_id, path);
+ return vfs_quota_on(sb, type, format_id, path, 0);
}
/* Quotafile not of fs root? */
if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode)
@@ -2049,7 +2065,7 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
"reiserfs: Quota file not on filesystem root. "
"Journalled quota will not work.");
path_put(&nd.path);
- return vfs_quota_on(sb, type, format_id, path);
+ return vfs_quota_on(sb, type, format_id, path, 0);
}
/* Read data from quotafile - avoid pagecache and such because we cannot afford
diff --git a/fs/super.c b/fs/super.c
index 4798350b2bc9..a5a4aca7e22f 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -179,7 +179,7 @@ void deactivate_super(struct super_block *s)
if (atomic_dec_and_lock(&s->s_active, &sb_lock)) {
s->s_count -= S_BIAS-1;
spin_unlock(&sb_lock);
- DQUOT_OFF(s);
+ DQUOT_OFF(s, 0);
down_write(&s->s_umount);
fs->kill_sb(s);
put_filesystem(fs);
@@ -608,6 +608,7 @@ retry:
int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
{
int retval;
+ int remount_rw;
#ifdef CONFIG_BLOCK
if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
@@ -625,8 +626,11 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
mark_files_ro(sb);
else if (!fs_may_remount_ro(sb))
return -EBUSY;
- DQUOT_OFF(sb);
+ retval = DQUOT_OFF(sb, 1);
+ if (retval < 0 && retval != -ENOSYS)
+ return -EBUSY;
}
+ remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY);
if (sb->s_op->remount_fs) {
lock_super(sb);
@@ -636,6 +640,8 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
return retval;
}
sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
+ if (remount_rw)
+ DQUOT_ON_REMOUNT(sb);
return 0;
}
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index ade9a7e6a757..dbdfabbfd609 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -477,11 +477,10 @@ const struct file_operations sysfs_file_operations = {
.poll = sysfs_poll,
};
-
-int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
- int type)
+int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
+ const struct attribute *attr, int type, mode_t amode)
{
- umode_t mode = (attr->mode & S_IALLUGO) | S_IFREG;
+ umode_t mode = (amode & S_IALLUGO) | S_IFREG;
struct sysfs_addrm_cxt acxt;
struct sysfs_dirent *sd;
int rc;
@@ -502,6 +501,13 @@ int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
}
+int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
+ int type)
+{
+ return sysfs_add_file_mode(dir_sd, attr, type, attr->mode);
+}
+
+
/**
* sysfs_create_file - create an attribute file for an object.
* @kobj: object we're creating for.
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 477904915032..eeba38417b1d 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -23,35 +23,50 @@ static void remove_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
int i;
for (i = 0, attr = grp->attrs; *attr; i++, attr++)
- if (!grp->is_visible ||
- grp->is_visible(kobj, *attr, i))
- sysfs_hash_and_remove(dir_sd, (*attr)->name);
+ sysfs_hash_and_remove(dir_sd, (*attr)->name);
}
static int create_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
- const struct attribute_group *grp)
+ const struct attribute_group *grp, int update)
{
struct attribute *const* attr;
int error = 0, i;
- for (i = 0, attr = grp->attrs; *attr && !error; i++, attr++)
- if (!grp->is_visible ||
- grp->is_visible(kobj, *attr, i))
- error |=
- sysfs_add_file(dir_sd, *attr, SYSFS_KOBJ_ATTR);
+ for (i = 0, attr = grp->attrs; *attr && !error; i++, attr++) {
+ mode_t mode = 0;
+
+ /* in update mode, we're changing the permissions or
+ * visibility. Do this by first removing then
+ * re-adding (if required) the file */
+ if (update)
+ sysfs_hash_and_remove(dir_sd, (*attr)->name);
+ if (grp->is_visible) {
+ mode = grp->is_visible(kobj, *attr, i);
+ if (!mode)
+ continue;
+ }
+ error = sysfs_add_file_mode(dir_sd, *attr, SYSFS_KOBJ_ATTR,
+ (*attr)->mode | mode);
+ if (unlikely(error))
+ break;
+ }
if (error)
remove_files(dir_sd, kobj, grp);
return error;
}
-int sysfs_create_group(struct kobject * kobj,
- const struct attribute_group * grp)
+static int internal_create_group(struct kobject *kobj, int update,
+ const struct attribute_group *grp)
{
struct sysfs_dirent *sd;
int error;
- BUG_ON(!kobj || !kobj->sd);
+ BUG_ON(!kobj || (!update && !kobj->sd));
+
+ /* Updates may happen before the object has been instantiated */
+ if (unlikely(update && !kobj->sd))
+ return -EINVAL;
if (grp->name) {
error = sysfs_create_subdir(kobj, grp->name, &sd);
@@ -60,7 +75,7 @@ int sysfs_create_group(struct kobject * kobj,
} else
sd = kobj->sd;
sysfs_get(sd);
- error = create_files(sd, kobj, grp);
+ error = create_files(sd, kobj, grp, update);
if (error) {
if (grp->name)
sysfs_remove_subdir(sd);
@@ -69,6 +84,47 @@ int sysfs_create_group(struct kobject * kobj,
return error;
}
+/**
+ * sysfs_create_group - given a directory kobject, create an attribute group
+ * @kobj: The kobject to create the group on
+ * @grp: The attribute group to create
+ *
+ * This function creates a group for the first time. It will explicitly
+ * warn and error if any of the attribute files being created already exist.
+ *
+ * Returns 0 on success or error.
+ */
+int sysfs_create_group(struct kobject *kobj,
+ const struct attribute_group *grp)
+{
+ return internal_create_group(kobj, 0, grp);
+}
+
+/**
+ * sysfs_update_group - given a directory kobject, create an attribute group
+ * @kobj: The kobject to create the group on
+ * @grp: The attribute group to create
+ *
+ * This function updates an attribute group. Unlike
+ * sysfs_create_group(), it will explicitly not warn or error if any
+ * of the attribute files being created already exist. Furthermore,
+ * if the visibility of the files has changed through the is_visible()
+ * callback, it will update the permissions and add or remove the
+ * relevant files.
+ *
+ * The primary use for this function is to call it after making a change
+ * that affects group visibility.
+ *
+ * Returns 0 on success or error.
+ */
+int sysfs_update_group(struct kobject *kobj,
+ const struct attribute_group *grp)
+{
+ return internal_create_group(kobj, 1, grp);
+}
+
+
+
void sysfs_remove_group(struct kobject * kobj,
const struct attribute_group * grp)
{
@@ -95,4 +151,5 @@ void sysfs_remove_group(struct kobject * kobj,
EXPORT_SYMBOL_GPL(sysfs_create_group);
+EXPORT_SYMBOL_GPL(sysfs_update_group);
EXPORT_SYMBOL_GPL(sysfs_remove_group);
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index ff17f8da9b43..ce4e15f8aaeb 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -154,6 +154,8 @@ extern const struct file_operations sysfs_file_operations;
int sysfs_add_file(struct sysfs_dirent *dir_sd,
const struct attribute *attr, int type);
+int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
+ const struct attribute *attr, int type, mode_t amode);
/*
* bin.c
*/
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index ba5537d4bc15..2b34c8ca6c83 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -890,7 +890,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
kernel_lb_addr eloc;
- uint32_t elen;
+ uint32_t bsize;
block = udf_new_block(inode->i_sb, inode,
iinfo->i_location.partitionReferenceNum,
@@ -903,9 +903,9 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
eloc.logicalBlockNum = block;
eloc.partitionReferenceNum =
iinfo->i_location.partitionReferenceNum;
- elen = inode->i_sb->s_blocksize;
- iinfo->i_lenExtents = elen;
- udf_add_aext(inode, &epos, eloc, elen, 0);
+ bsize = inode->i_sb->s_blocksize;
+ iinfo->i_lenExtents = bsize;
+ udf_add_aext(inode, &epos, eloc, bsize, 0);
brelse(epos.bh);
block = udf_get_pblock(inode->i_sb, block,
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 1e7598fb9787..0d9ada173739 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -277,7 +277,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
if (!page)/* it was truncated */
continue;
if (IS_ERR(page)) {/* or EIO */
- ufs_error(inode->i_sb, __FUNCTION__,
+ ufs_error(inode->i_sb, __func__,
"read of page %llu failed\n",
(unsigned long long)index);
continue;
@@ -308,7 +308,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
- ufs_error(inode->i_sb, __FUNCTION__,
+ ufs_error(inode->i_sb, __func__,
"read of block failed\n");
break;
}
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index ef563fc8d72c..df0bef18742d 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -179,7 +179,7 @@ bad_entry:
goto fail;
Eend:
p = (struct ufs_dir_entry *)(kaddr + offs);
- ufs_error(sb, __FUNCTION__,
+ ufs_error(sb, __func__,
"entry in directory #%lu spans the page boundary"
"offset=%lu",
dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs);
@@ -284,7 +284,7 @@ struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct dentry *dentry,
kaddr += ufs_last_byte(dir, n) - reclen;
while ((char *) de <= kaddr) {
if (de->d_reclen == 0) {
- ufs_error(dir->i_sb, __FUNCTION__,
+ ufs_error(dir->i_sb, __func__,
"zero-length directory entry");
ufs_put_page(page);
goto out;
@@ -356,7 +356,7 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
goto got_it;
}
if (de->d_reclen == 0) {
- ufs_error(dir->i_sb, __FUNCTION__,
+ ufs_error(dir->i_sb, __func__,
"zero-length directory entry");
err = -EIO;
goto out_unlock;
@@ -456,7 +456,7 @@ ufs_readdir(struct file *filp, void *dirent, filldir_t filldir)
struct page *page = ufs_get_page(inode, n);
if (IS_ERR(page)) {
- ufs_error(sb, __FUNCTION__,
+ ufs_error(sb, __func__,
"bad page in #%lu",
inode->i_ino);
filp->f_pos += PAGE_CACHE_SIZE - offset;
@@ -475,7 +475,7 @@ ufs_readdir(struct file *filp, void *dirent, filldir_t filldir)
limit = kaddr + ufs_last_byte(inode, n) - UFS_DIR_REC_LEN(1);
for ( ;(char*)de <= limit; de = ufs_next_entry(sb, de)) {
if (de->d_reclen == 0) {
- ufs_error(sb, __FUNCTION__,
+ ufs_error(sb, __func__,
"zero-length directory entry");
ufs_put_page(page);
return -EIO;
@@ -536,7 +536,7 @@ int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
while ((char*)de < (char*)dir) {
if (de->d_reclen == 0) {
- ufs_error(inode->i_sb, __FUNCTION__,
+ ufs_error(inode->i_sb, __func__,
"zero-length directory entry");
err = -EIO;
goto out;
@@ -633,7 +633,7 @@ int ufs_empty_dir(struct inode * inode)
while ((char *)de <= kaddr) {
if (de->d_reclen == 0) {
- ufs_error(inode->i_sb, __FUNCTION__,
+ ufs_error(inode->i_sb, __func__,
"zero-length directory entry: "
"kaddr=%p, de=%p\n", kaddr, de);
goto not_empty;
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 5446b888fc8e..39f877898565 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -929,7 +929,7 @@ void ufs_delete_inode (struct inode * inode)
old_i_size = inode->i_size;
inode->i_size = 0;
if (inode->i_blocks && ufs_truncate(inode, old_i_size))
- ufs_warning(inode->i_sb, __FUNCTION__, "ufs_truncate failed\n");
+ ufs_warning(inode->i_sb, __func__, "ufs_truncate failed\n");
ufs_free_inode (inode);
unlock_kernel();
return;
diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
index 1683d2bee614..8d974c4fd18b 100644
--- a/fs/ufs/swab.h
+++ b/fs/ufs/swab.h
@@ -40,25 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
return (__force __fs64)cpu_to_be64(n);
}
-static __inline u32
-fs64_add(struct super_block *sbp, u32 *n, int d)
-{
- if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
- return *n = cpu_to_le64(le64_to_cpu(*n)+d);
- else
- return *n = cpu_to_be64(be64_to_cpu(*n)+d);
-}
-
-static __inline u32
-fs64_sub(struct super_block *sbp, u32 *n, int d)
-{
- if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
- return *n = cpu_to_le64(le64_to_cpu(*n)-d);
- else
- return *n = cpu_to_be64(be64_to_cpu(*n)-d);
-}
-
-static __inline u32
+static inline u32
fs32_to_cpu(struct super_block *sbp, __fs32 n)
{
if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
@@ -80,18 +62,18 @@ static inline void
fs32_add(struct super_block *sbp, __fs32 *n, int d)
{
if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
- *(__le32 *)n = cpu_to_le32(le32_to_cpu(*(__le32 *)n)+d);
+ le32_add_cpu((__le32 *)n, d);
else
- *(__be32 *)n = cpu_to_be32(be32_to_cpu(*(__be32 *)n)+d);
+ be32_add_cpu((__be32 *)n, d);
}
static inline void
fs32_sub(struct super_block *sbp, __fs32 *n, int d)
{
if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
- *(__le32 *)n = cpu_to_le32(le32_to_cpu(*(__le32 *)n)-d);
+ le32_add_cpu((__le32 *)n, -d);
else
- *(__be32 *)n = cpu_to_be32(be32_to_cpu(*(__be32 *)n)-d);
+ be32_add_cpu((__be32 *)n, -d);
}
static inline u16
@@ -116,18 +98,18 @@ static inline void
fs16_add(struct super_block *sbp, __fs16 *n, int d)
{
if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
- *(__le16 *)n = cpu_to_le16(le16_to_cpu(*(__le16 *)n)+d);
+ le16_add_cpu((__le16 *)n, d);
else
- *(__be16 *)n = cpu_to_be16(be16_to_cpu(*(__be16 *)n)+d);
+ be16_add_cpu((__be16 *)n, d);
}
static inline void
fs16_sub(struct super_block *sbp, __fs16 *n, int d)
{
if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
- *(__le16 *)n = cpu_to_le16(le16_to_cpu(*(__le16 *)n)-d);
+ le16_add_cpu((__le16 *)n, -d);
else
- *(__be16 *)n = cpu_to_be16(be16_to_cpu(*(__be16 *)n)-d);
+ be16_add_cpu((__be16 *)n, -d);
}
#endif /* _UFS_SWAB_H */
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index fcb9231bb9ed..244a1aaa940e 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -66,7 +66,7 @@ struct ufs_inode_info {
#ifdef CONFIG_UFS_DEBUG
# define UFSD(f, a...) { \
printk ("UFSD (%s, %d): %s:", \
- __FILE__, __LINE__, __FUNCTION__); \
+ __FILE__, __LINE__, __func__); \
printk (f, ## a); \
}
#else
diff --git a/fs/vfat/namei.c b/fs/vfat/namei.c
index cd450bea9f1a..5b66162d0747 100644
--- a/fs/vfat/namei.c
+++ b/fs/vfat/namei.c
@@ -176,15 +176,10 @@ static inline int vfat_is_used_badchars(const wchar_t *s, int len)
for (i = 0; i < len; i++)
if (vfat_bad_char(s[i]))
return -EINVAL;
- return 0;
-}
-static int vfat_valid_longname(const unsigned char *name, unsigned int len)
-{
- if (name[len - 1] == ' ')
+ if (s[i - 1] == ' ') /* last character cannot be space */
return -EINVAL;
- if (len >= 256)
- return -ENAMETOOLONG;
+
return 0;
}
@@ -477,7 +472,7 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
if (utf8) {
int name_len = strlen(name);
- *outlen = utf8_mbstowcs((wchar_t *)outname, name, PAGE_SIZE);
+ *outlen = utf8_mbstowcs((wchar_t *)outname, name, PATH_MAX);
/*
* We stripped '.'s before and set len appropriately,
@@ -485,11 +480,14 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
*/
*outlen -= (name_len - len);
+ if (*outlen > 255)
+ return -ENAMETOOLONG;
+
op = &outname[*outlen * sizeof(wchar_t)];
} else {
if (nls) {
for (i = 0, ip = name, op = outname, *outlen = 0;
- i < len && *outlen <= 260;
+ i < len && *outlen <= 255;
*outlen += 1)
{
if (escape && (*ip == ':')) {
@@ -525,18 +523,20 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
op += 2;
}
}
+ if (i < len)
+ return -ENAMETOOLONG;
} else {
for (i = 0, ip = name, op = outname, *outlen = 0;
- i < len && *outlen <= 260;
+ i < len && *outlen <= 255;
i++, *outlen += 1)
{
*op++ = *ip++;
*op++ = 0;
}
+ if (i < len)
+ return -ENAMETOOLONG;
}
}
- if (*outlen > 260)
- return -ENAMETOOLONG;
*longlen = *outlen;
if (*outlen % 13) {
@@ -565,7 +565,6 @@ static int vfat_build_slots(struct inode *dir, const unsigned char *name,
struct fat_mount_options *opts = &sbi->options;
struct msdos_dir_slot *ps;
struct msdos_dir_entry *de;
- unsigned long page;
unsigned char cksum, lcase;
unsigned char msdos_name[MSDOS_NAME];
wchar_t *uname;
@@ -574,15 +573,11 @@ static int vfat_build_slots(struct inode *dir, const unsigned char *name,
loff_t offset;
*nr_slots = 0;
- err = vfat_valid_longname(name, len);
- if (err)
- return err;
- page = __get_free_page(GFP_KERNEL);
- if (!page)
+ uname = __getname();
+ if (!uname)
return -ENOMEM;
- uname = (wchar_t *)page;
err = xlate_to_uni(name, len, (unsigned char *)uname, &ulen, &usize,
opts->unicode_xlate, opts->utf8, sbi->nls_io);
if (err)
@@ -634,7 +629,7 @@ shortname:
de->starthi = cpu_to_le16(cluster >> 16);
de->size = 0;
out_free:
- free_page(page);
+ __putname(uname);
return err;
}
@@ -1003,7 +998,7 @@ static const struct inode_operations vfat_dir_inode_operations = {
.mkdir = vfat_mkdir,
.rmdir = vfat_rmdir,
.rename = vfat_rename,
- .setattr = fat_notify_change,
+ .setattr = fat_setattr,
.getattr = fat_getattr,
};
diff --git a/include/asm-alpha/bug.h b/include/asm-alpha/bug.h
index 39a3e2a5017d..695a5ee4b5d3 100644
--- a/include/asm-alpha/bug.h
+++ b/include/asm-alpha/bug.h
@@ -1,14 +1,24 @@
#ifndef _ALPHA_BUG_H
#define _ALPHA_BUG_H
+#include <linux/linkage.h>
+
#ifdef CONFIG_BUG
#include <asm/pal.h>
/* ??? Would be nice to use .gprel32 here, but we can't be sure that the
function loaded the GP, so this could fail in modules. */
-#define BUG() \
- __asm__ __volatile__("call_pal %0 # bugchk\n\t"".long %1\n\t.8byte %2" \
- : : "i" (PAL_bugchk), "i"(__LINE__), "i"(__FILE__))
+static inline void ATTRIB_NORET __BUG(const char *file, int line)
+{
+ __asm__ __volatile__(
+ "call_pal %0 # bugchk\n\t"
+ ".long %1\n\t.8byte %2"
+ : : "i" (PAL_bugchk), "i"(line), "i"(file));
+ for ( ; ; )
+ ;
+}
+
+#define BUG() __BUG(__FILE__, __LINE__)
#define HAVE_ARCH_BUG
#endif
diff --git a/include/asm-alpha/byteorder.h b/include/asm-alpha/byteorder.h
index 7af2b8d25486..58e958fc7f1b 100644
--- a/include/asm-alpha/byteorder.h
+++ b/include/asm-alpha/byteorder.h
@@ -7,7 +7,7 @@
#ifdef __GNUC__
-static __inline __attribute_const__ __u32 __arch__swab32(__u32 x)
+static inline __attribute_const__ __u32 __arch__swab32(__u32 x)
{
/*
* Unfortunately, we can't use the 6 instruction sequence
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
index 99037b032357..05ce5fba43e3 100644
--- a/include/asm-alpha/pgtable.h
+++ b/include/asm-alpha/pgtable.h
@@ -268,6 +268,7 @@ extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); }
extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
extern inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+extern inline int pte_special(pte_t pte) { return 0; }
extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; }
extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
@@ -275,6 +276,7 @@ extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); ret
extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; }
extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
+extern inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
diff --git a/include/asm-arm/arch-sa1100/ide.h b/include/asm-arm/arch-sa1100/ide.h
index 98b10bcf9f1b..b14cbda01dc3 100644
--- a/include/asm-arm/arch-sa1100/ide.h
+++ b/include/asm-arm/arch-sa1100/ide.h
@@ -37,12 +37,12 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port,
memset(hw, 0, sizeof(*hw));
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
- hw->io_ports[i] = reg;
+ for (i = 0; i <= 7; i++) {
+ hw->io_ports_array[i] = reg;
reg += regincr;
}
- hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
+ hw->io_ports.ctl_addr = ctrl_port;
if (irq)
*irq = 0;
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index 5e0182485d8c..5571c13c3f3b 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -260,6 +260,7 @@ extern struct page *empty_zero_page;
#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
+#define pte_special(pte) (0)
/*
* The following only works if pte_present() is not true.
@@ -280,6 +281,8 @@ PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
/*
* Mark the prot value as uncacheable and unbufferable.
*/
diff --git a/include/asm-avr32/pgtable.h b/include/asm-avr32/pgtable.h
index 3ae7b548fce7..c0e5e29417df 100644
--- a/include/asm-avr32/pgtable.h
+++ b/include/asm-avr32/pgtable.h
@@ -212,6 +212,10 @@ static inline int pte_young(pte_t pte)
{
return pte_val(pte) & _PAGE_ACCESSED;
}
+static inline int pte_special(pte_t pte)
+{
+ return 0;
+}
/*
* The following only work if pte_present() is not true.
@@ -252,6 +256,10 @@ static inline pte_t pte_mkyoung(pte_t pte)
set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED));
return pte;
}
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return pte;
+}
#define pmd_none(x) (!pmd_val(x))
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
diff --git a/include/asm-cris/arch-v10/ide.h b/include/asm-cris/arch-v10/ide.h
index ea34e0d0a388..5366e6239328 100644
--- a/include/asm-cris/arch-v10/ide.h
+++ b/include/asm-cris/arch-v10/ide.h
@@ -59,22 +59,19 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port, u
int i;
/* fill in ports for ATA addresses 0 to 7 */
-
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
- hw->io_ports[i] = data_port |
+ for (i = 0; i <= 7; i++) {
+ hw->io_ports_array[i] = data_port |
IO_FIELD(R_ATA_CTRL_DATA, addr, i) |
IO_STATE(R_ATA_CTRL_DATA, cs0, active);
}
/* the IDE control register is at ATA address 6, with CS1 active instead of CS0 */
-
- hw->io_ports[IDE_CONTROL_OFFSET] = data_port |
+ hw->io_ports.ctl_addr = data_port |
IO_FIELD(R_ATA_CTRL_DATA, addr, 6) |
IO_STATE(R_ATA_CTRL_DATA, cs1, active);
/* whats this for ? */
-
- hw->io_ports[IDE_IRQ_OFFSET] = 0;
+ hw->io_ports.irq_addr = 0;
}
static inline void ide_init_default_hwifs(void)
diff --git a/include/asm-cris/pgtable.h b/include/asm-cris/pgtable.h
index a2607575681b..829e7a7d9fb9 100644
--- a/include/asm-cris/pgtable.h
+++ b/include/asm-cris/pgtable.h
@@ -115,6 +115,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WR
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_wrprotect(pte_t pte)
{
@@ -162,6 +163,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
}
return pte;
}
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/*
* Conversion functions: convert a page and protection to a page entry,
@@ -229,7 +231,7 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
/* to find an entry in a page-table-directory */
-static inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
+static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long address)
{
return mm->pgd + pgd_index(address);
}
diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h
index 4e219046fe42..83c51aba534b 100644
--- a/include/asm-frv/pgtable.h
+++ b/include/asm-frv/pgtable.h
@@ -380,6 +380,7 @@ static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address)
static inline int pte_dirty(pte_t pte) { return (pte).pte & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return (pte).pte & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte) { return !((pte).pte & _PAGE_WP); }
+static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_mkclean(pte_t pte) { (pte).pte &= ~_PAGE_DIRTY; return pte; }
static inline pte_t pte_mkold(pte_t pte) { (pte).pte &= ~_PAGE_ACCESSED; return pte; }
@@ -387,6 +388,7 @@ static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte |= _PAGE_WP; return pte
static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte &= ~_PAGE_WP; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index f29a502f4a6c..ecf675a59d21 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -16,7 +16,14 @@
#define ARCH_NR_GPIOS 256
#endif
+static inline int gpio_is_valid(int number)
+{
+ /* only some non-negative numbers are valid */
+ return ((unsigned)number) < ARCH_NR_GPIOS;
+}
+
struct seq_file;
+struct module;
/**
* struct gpio_chip - abstract a GPIO controller
@@ -48,6 +55,7 @@ struct seq_file;
*/
struct gpio_chip {
char *label;
+ struct module *owner;
int (*direction_input)(struct gpio_chip *chip,
unsigned offset);
@@ -66,6 +74,7 @@ struct gpio_chip {
extern const char *gpiochip_is_requested(struct gpio_chip *chip,
unsigned offset);
+extern int __init __must_check gpiochip_reserve(int start, int ngpio);
/* add/remove chips */
extern int gpiochip_add(struct gpio_chip *chip);
@@ -97,6 +106,12 @@ extern int __gpio_cansleep(unsigned gpio);
#else
+static inline int gpio_is_valid(int number)
+{
+ /* only non-negative numbers are valid */
+ return number >= 0;
+}
+
/* platforms that don't directly support access to GPIOs through I2C, SPI,
* or other blocking infrastructure can use these wrappers.
*/
diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h
index de2ed2cbdd84..2fe292c275fe 100644
--- a/include/asm-ia64/gcc_intrin.h
+++ b/include/asm-ia64/gcc_intrin.h
@@ -21,6 +21,10 @@
#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
+#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
+
+#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
+
extern void ia64_bad_param_for_setreg (void);
extern void ia64_bad_param_for_getreg (void);
@@ -517,6 +521,14 @@ do { \
#define ia64_ptrd(addr, size) \
asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
+#define ia64_ttag(addr) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
+ ia64_intri_res; \
+})
+
+
/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
#define ia64_lfhint_none 0
diff --git a/include/asm-ia64/hugetlb.h b/include/asm-ia64/hugetlb.h
new file mode 100644
index 000000000000..f28a9701f1cf
--- /dev/null
+++ b/include/asm-ia64/hugetlb.h
@@ -0,0 +1,79 @@
+#ifndef _ASM_IA64_HUGETLB_H
+#define _ASM_IA64_HUGETLB_H
+
+#include <asm/page.h>
+
+
+void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
+ unsigned long end, unsigned long floor,
+ unsigned long ceiling);
+
+int prepare_hugepage_range(unsigned long addr, unsigned long len);
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr,
+ unsigned long len)
+{
+ return (REGION_NUMBER(addr) == RGN_HPAGE ||
+ REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE);
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+{
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+ return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+#endif /* _ASM_IA64_HUGETLB_H */
diff --git a/include/asm-ia64/kvm.h b/include/asm-ia64/kvm.h
index 030d29b4b26b..eb2d3559d089 100644
--- a/include/asm-ia64/kvm.h
+++ b/include/asm-ia64/kvm.h
@@ -1,6 +1,205 @@
-#ifndef __LINUX_KVM_IA64_H
-#define __LINUX_KVM_IA64_H
+#ifndef __ASM_IA64_KVM_H
+#define __ASM_IA64_KVM_H
-/* ia64 does not support KVM */
+/*
+ * asm-ia64/kvm.h: kvm structure definitions for ia64
+ *
+ * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <asm/types.h>
+#include <asm/fpu.h>
+
+#include <linux/ioctl.h>
+
+/* Architectural interrupt line count. */
+#define KVM_NR_INTERRUPTS 256
+
+#define KVM_IOAPIC_NUM_PINS 24
+
+struct kvm_ioapic_state {
+ __u64 base_address;
+ __u32 ioregsel;
+ __u32 id;
+ __u32 irr;
+ __u32 pad;
+ union {
+ __u64 bits;
+ struct {
+ __u8 vector;
+ __u8 delivery_mode:3;
+ __u8 dest_mode:1;
+ __u8 delivery_status:1;
+ __u8 polarity:1;
+ __u8 remote_irr:1;
+ __u8 trig_mode:1;
+ __u8 mask:1;
+ __u8 reserve:7;
+ __u8 reserved[4];
+ __u8 dest_id;
+ } fields;
+ } redirtbl[KVM_IOAPIC_NUM_PINS];
+};
+
+#define KVM_IRQCHIP_PIC_MASTER 0
+#define KVM_IRQCHIP_PIC_SLAVE 1
+#define KVM_IRQCHIP_IOAPIC 2
+
+#define KVM_CONTEXT_SIZE 8*1024
+
+union context {
+ /* 8K size */
+ char dummy[KVM_CONTEXT_SIZE];
+ struct {
+ unsigned long psr;
+ unsigned long pr;
+ unsigned long caller_unat;
+ unsigned long pad;
+ unsigned long gr[32];
+ unsigned long ar[128];
+ unsigned long br[8];
+ unsigned long cr[128];
+ unsigned long rr[8];
+ unsigned long ibr[8];
+ unsigned long dbr[8];
+ unsigned long pkr[8];
+ struct ia64_fpreg fr[128];
+ };
+};
+
+struct thash_data {
+ union {
+ struct {
+ unsigned long p : 1; /* 0 */
+ unsigned long rv1 : 1; /* 1 */
+ unsigned long ma : 3; /* 2-4 */
+ unsigned long a : 1; /* 5 */
+ unsigned long d : 1; /* 6 */
+ unsigned long pl : 2; /* 7-8 */
+ unsigned long ar : 3; /* 9-11 */
+ unsigned long ppn : 38; /* 12-49 */
+ unsigned long rv2 : 2; /* 50-51 */
+ unsigned long ed : 1; /* 52 */
+ unsigned long ig1 : 11; /* 53-63 */
+ };
+ struct {
+ unsigned long __rv1 : 53; /* 0-52 */
+ unsigned long contiguous : 1; /*53 */
+ unsigned long tc : 1; /* 54 TR or TC */
+ unsigned long cl : 1;
+ /* 55 I side or D side cache line */
+ unsigned long len : 4; /* 56-59 */
+ unsigned long io : 1; /* 60 entry is for io or not */
+ unsigned long nomap : 1;
+ /* 61 entry cann't be inserted into machine TLB.*/
+ unsigned long checked : 1;
+ /* 62 for VTLB/VHPT sanity check */
+ unsigned long invalid : 1;
+ /* 63 invalid entry */
+ };
+ unsigned long page_flags;
+ }; /* same for VHPT and TLB */
+
+ union {
+ struct {
+ unsigned long rv3 : 2;
+ unsigned long ps : 6;
+ unsigned long key : 24;
+ unsigned long rv4 : 32;
+ };
+ unsigned long itir;
+ };
+ union {
+ struct {
+ unsigned long ig2 : 12;
+ unsigned long vpn : 49;
+ unsigned long vrn : 3;
+ };
+ unsigned long ifa;
+ unsigned long vadr;
+ struct {
+ unsigned long tag : 63;
+ unsigned long ti : 1;
+ };
+ unsigned long etag;
+ };
+ union {
+ struct thash_data *next;
+ unsigned long rid;
+ unsigned long gpaddr;
+ };
+};
+
+#define NITRS 8
+#define NDTRS 8
+
+struct saved_vpd {
+ unsigned long vhpi;
+ unsigned long vgr[16];
+ unsigned long vbgr[16];
+ unsigned long vnat;
+ unsigned long vbnat;
+ unsigned long vcpuid[5];
+ unsigned long vpsr;
+ unsigned long vpr;
+ unsigned long vcr[128];
+};
+
+struct kvm_regs {
+ char *saved_guest;
+ char *saved_stack;
+ struct saved_vpd vpd;
+ /*Arch-regs*/
+ int mp_state;
+ unsigned long vmm_rr;
+ /* TR and TC. */
+ struct thash_data itrs[NITRS];
+ struct thash_data dtrs[NDTRS];
+ /* Bit is set if there is a tr/tc for the region. */
+ unsigned char itr_regions;
+ unsigned char dtr_regions;
+ unsigned char tc_regions;
+
+ char irq_check;
+ unsigned long saved_itc;
+ unsigned long itc_check;
+ unsigned long timer_check;
+ unsigned long timer_pending;
+ unsigned long last_itc;
+
+ unsigned long vrr[8];
+ unsigned long ibr[8];
+ unsigned long dbr[8];
+ unsigned long insvc[4]; /* Interrupt in service. */
+ unsigned long xtp;
+
+ unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
+ unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
+ unsigned long metaphysical_saved_rr0; /* from kvm_arch */
+ unsigned long metaphysical_saved_rr4; /* from kvm_arch */
+ unsigned long fp_psr; /*used for lazy float register */
+ unsigned long saved_gp;
+ /*for phycial emulation */
+};
+
+struct kvm_sregs {
+};
+
+struct kvm_fpu {
+};
#endif
diff --git a/include/asm-ia64/kvm_host.h b/include/asm-ia64/kvm_host.h
new file mode 100644
index 000000000000..c082c208c1f3
--- /dev/null
+++ b/include/asm-ia64/kvm_host.h
@@ -0,0 +1,524 @@
+/*
+ * kvm_host.h: used for kvm module, and hold ia64-specific sections.
+ *
+ * Copyright (C) 2007, Intel Corporation.
+ *
+ * Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#ifndef __ASM_KVM_HOST_H
+#define __ASM_KVM_HOST_H
+
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/kvm.h>
+#include <linux/kvm_para.h>
+#include <linux/kvm_types.h>
+
+#include <asm/pal.h>
+#include <asm/sal.h>
+
+#define KVM_MAX_VCPUS 4
+#define KVM_MEMORY_SLOTS 32
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+
+/* define exit reasons from vmm to kvm*/
+#define EXIT_REASON_VM_PANIC 0
+#define EXIT_REASON_MMIO_INSTRUCTION 1
+#define EXIT_REASON_PAL_CALL 2
+#define EXIT_REASON_SAL_CALL 3
+#define EXIT_REASON_SWITCH_RR6 4
+#define EXIT_REASON_VM_DESTROY 5
+#define EXIT_REASON_EXTERNAL_INTERRUPT 6
+#define EXIT_REASON_IPI 7
+#define EXIT_REASON_PTC_G 8
+
+/*Define vmm address space and vm data space.*/
+#define KVM_VMM_SIZE (16UL<<20)
+#define KVM_VMM_SHIFT 24
+#define KVM_VMM_BASE 0xD000000000000000UL
+#define VMM_SIZE (8UL<<20)
+
+/*
+ * Define vm_buffer, used by PAL Services, base address.
+ * Note: vmbuffer is in the VMM-BLOCK, the size must be < 8M
+ */
+#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE)
+#define KVM_VM_BUFFER_SIZE (8UL<<20)
+
+/*Define Virtual machine data layout.*/
+#define KVM_VM_DATA_SHIFT 24
+#define KVM_VM_DATA_SIZE (1UL << KVM_VM_DATA_SHIFT)
+#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VMM_SIZE)
+
+
+#define KVM_P2M_BASE KVM_VM_DATA_BASE
+#define KVM_P2M_OFS 0
+#define KVM_P2M_SIZE (8UL << 20)
+
+#define KVM_VHPT_BASE (KVM_P2M_BASE + KVM_P2M_SIZE)
+#define KVM_VHPT_OFS KVM_P2M_SIZE
+#define KVM_VHPT_BLOCK_SIZE (2UL << 20)
+#define VHPT_SHIFT 18
+#define VHPT_SIZE (1UL << VHPT_SHIFT)
+#define VHPT_NUM_ENTRIES (1<<(VHPT_SHIFT-5))
+
+#define KVM_VTLB_BASE (KVM_VHPT_BASE+KVM_VHPT_BLOCK_SIZE)
+#define KVM_VTLB_OFS (KVM_VHPT_OFS+KVM_VHPT_BLOCK_SIZE)
+#define KVM_VTLB_BLOCK_SIZE (1UL<<20)
+#define VTLB_SHIFT 17
+#define VTLB_SIZE (1UL<<VTLB_SHIFT)
+#define VTLB_NUM_ENTRIES (1<<(VTLB_SHIFT-5))
+
+#define KVM_VPD_BASE (KVM_VTLB_BASE+KVM_VTLB_BLOCK_SIZE)
+#define KVM_VPD_OFS (KVM_VTLB_OFS+KVM_VTLB_BLOCK_SIZE)
+#define KVM_VPD_BLOCK_SIZE (2UL<<20)
+#define VPD_SHIFT 16
+#define VPD_SIZE (1UL<<VPD_SHIFT)
+
+#define KVM_VCPU_BASE (KVM_VPD_BASE+KVM_VPD_BLOCK_SIZE)
+#define KVM_VCPU_OFS (KVM_VPD_OFS+KVM_VPD_BLOCK_SIZE)
+#define KVM_VCPU_BLOCK_SIZE (2UL<<20)
+#define VCPU_SHIFT 18
+#define VCPU_SIZE (1UL<<VCPU_SHIFT)
+#define MAX_VCPU_NUM KVM_VCPU_BLOCK_SIZE/VCPU_SIZE
+
+#define KVM_VM_BASE (KVM_VCPU_BASE+KVM_VCPU_BLOCK_SIZE)
+#define KVM_VM_OFS (KVM_VCPU_OFS+KVM_VCPU_BLOCK_SIZE)
+#define KVM_VM_BLOCK_SIZE (1UL<<19)
+
+#define KVM_MEM_DIRTY_LOG_BASE (KVM_VM_BASE+KVM_VM_BLOCK_SIZE)
+#define KVM_MEM_DIRTY_LOG_OFS (KVM_VM_OFS+KVM_VM_BLOCK_SIZE)
+#define KVM_MEM_DIRTY_LOG_SIZE (1UL<<19)
+
+/* Get vpd, vhpt, tlb, vcpu, base*/
+#define VPD_ADDR(n) (KVM_VPD_BASE+n*VPD_SIZE)
+#define VHPT_ADDR(n) (KVM_VHPT_BASE+n*VHPT_SIZE)
+#define VTLB_ADDR(n) (KVM_VTLB_BASE+n*VTLB_SIZE)
+#define VCPU_ADDR(n) (KVM_VCPU_BASE+n*VCPU_SIZE)
+
+/*IO section definitions*/
+#define IOREQ_READ 1
+#define IOREQ_WRITE 0
+
+#define STATE_IOREQ_NONE 0
+#define STATE_IOREQ_READY 1
+#define STATE_IOREQ_INPROCESS 2
+#define STATE_IORESP_READY 3
+
+/*Guest Physical address layout.*/
+#define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */
+#define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */
+#define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */
+#define GPFN_PIB (3UL << 60) /* PIB base */
+#define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */
+#define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */
+#define GPFN_GFW (6UL << 60) /* Guest Firmware */
+#define GPFN_HIGH_MMIO (7UL << 60) /* High MMIO range */
+
+#define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */
+#define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */
+#define INVALID_MFN (~0UL)
+#define MEM_G (1UL << 30)
+#define MEM_M (1UL << 20)
+#define MMIO_START (3 * MEM_G)
+#define MMIO_SIZE (512 * MEM_M)
+#define VGA_IO_START 0xA0000UL
+#define VGA_IO_SIZE 0x20000
+#define LEGACY_IO_START (MMIO_START + MMIO_SIZE)
+#define LEGACY_IO_SIZE (64 * MEM_M)
+#define IO_SAPIC_START 0xfec00000UL
+#define IO_SAPIC_SIZE 0x100000
+#define PIB_START 0xfee00000UL
+#define PIB_SIZE 0x200000
+#define GFW_START (4 * MEM_G - 16 * MEM_M)
+#define GFW_SIZE (16 * MEM_M)
+
+/*Deliver mode, defined for ioapic.c*/
+#define dest_Fixed IOSAPIC_FIXED
+#define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY
+
+#define NMI_VECTOR 2
+#define ExtINT_VECTOR 0
+#define NULL_VECTOR (-1)
+#define IA64_SPURIOUS_INT_VECTOR 0x0f
+
+#define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24)
+
+/*
+ *Delivery mode
+ */
+#define SAPIC_DELIV_SHIFT 8
+#define SAPIC_FIXED 0x0
+#define SAPIC_LOWEST_PRIORITY 0x1
+#define SAPIC_PMI 0x2
+#define SAPIC_NMI 0x4
+#define SAPIC_INIT 0x5
+#define SAPIC_EXTINT 0x7
+
+/*
+ * vcpu->requests bit members for arch
+ */
+#define KVM_REQ_PTC_G 32
+#define KVM_REQ_RESUME 33
+
+#define KVM_PAGES_PER_HPAGE 1
+
+struct kvm;
+struct kvm_vcpu;
+struct kvm_guest_debug{
+};
+
+struct kvm_mmio_req {
+ uint64_t addr; /* physical address */
+ uint64_t size; /* size in bytes */
+ uint64_t data; /* data (or paddr of data) */
+ uint8_t state:4;
+ uint8_t dir:1; /* 1=read, 0=write */
+};
+
+/*Pal data struct */
+struct kvm_pal_call{
+ /*In area*/
+ uint64_t gr28;
+ uint64_t gr29;
+ uint64_t gr30;
+ uint64_t gr31;
+ /*Out area*/
+ struct ia64_pal_retval ret;
+};
+
+/* Sal data structure */
+struct kvm_sal_call{
+ /*In area*/
+ uint64_t in0;
+ uint64_t in1;
+ uint64_t in2;
+ uint64_t in3;
+ uint64_t in4;
+ uint64_t in5;
+ uint64_t in6;
+ uint64_t in7;
+ struct sal_ret_values ret;
+};
+
+/*Guest change rr6*/
+struct kvm_switch_rr6 {
+ uint64_t old_rr;
+ uint64_t new_rr;
+};
+
+union ia64_ipi_a{
+ unsigned long val;
+ struct {
+ unsigned long rv : 3;
+ unsigned long ir : 1;
+ unsigned long eid : 8;
+ unsigned long id : 8;
+ unsigned long ib_base : 44;
+ };
+};
+
+union ia64_ipi_d {
+ unsigned long val;
+ struct {
+ unsigned long vector : 8;
+ unsigned long dm : 3;
+ unsigned long ig : 53;
+ };
+};
+
+/*ipi check exit data*/
+struct kvm_ipi_data{
+ union ia64_ipi_a addr;
+ union ia64_ipi_d data;
+};
+
+/*global purge data*/
+struct kvm_ptc_g {
+ unsigned long vaddr;
+ unsigned long rr;
+ unsigned long ps;
+ struct kvm_vcpu *vcpu;
+};
+
+/*Exit control data */
+struct exit_ctl_data{
+ uint32_t exit_reason;
+ uint32_t vm_status;
+ union {
+ struct kvm_mmio_req ioreq;
+ struct kvm_pal_call pal_data;
+ struct kvm_sal_call sal_data;
+ struct kvm_switch_rr6 rr_data;
+ struct kvm_ipi_data ipi_data;
+ struct kvm_ptc_g ptc_g_data;
+ } u;
+};
+
+union pte_flags {
+ unsigned long val;
+ struct {
+ unsigned long p : 1; /*0 */
+ unsigned long : 1; /* 1 */
+ unsigned long ma : 3; /* 2-4 */
+ unsigned long a : 1; /* 5 */
+ unsigned long d : 1; /* 6 */
+ unsigned long pl : 2; /* 7-8 */
+ unsigned long ar : 3; /* 9-11 */
+ unsigned long ppn : 38; /* 12-49 */
+ unsigned long : 2; /* 50-51 */
+ unsigned long ed : 1; /* 52 */
+ };
+};
+
+union ia64_pta {
+ unsigned long val;
+ struct {
+ unsigned long ve : 1;
+ unsigned long reserved0 : 1;
+ unsigned long size : 6;
+ unsigned long vf : 1;
+ unsigned long reserved1 : 6;
+ unsigned long base : 49;
+ };
+};
+
+struct thash_cb {
+ /* THASH base information */
+ struct thash_data *hash; /* hash table pointer */
+ union ia64_pta pta;
+ int num;
+};
+
+struct kvm_vcpu_stat {
+};
+
+struct kvm_vcpu_arch {
+ int launched;
+ int last_exit;
+ int last_run_cpu;
+ int vmm_tr_slot;
+ int vm_tr_slot;
+
+#define KVM_MP_STATE_RUNNABLE 0
+#define KVM_MP_STATE_UNINITIALIZED 1
+#define KVM_MP_STATE_INIT_RECEIVED 2
+#define KVM_MP_STATE_HALTED 3
+ int mp_state;
+
+#define MAX_PTC_G_NUM 3
+ int ptc_g_count;
+ struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM];
+
+ /*halt timer to wake up sleepy vcpus*/
+ struct hrtimer hlt_timer;
+ long ht_active;
+
+ struct kvm_lapic *apic; /* kernel irqchip context */
+ struct vpd *vpd;
+
+ /* Exit data for vmm_transition*/
+ struct exit_ctl_data exit_data;
+
+ cpumask_t cache_coherent_map;
+
+ unsigned long vmm_rr;
+ unsigned long host_rr6;
+ unsigned long psbits[8];
+ unsigned long cr_iipa;
+ unsigned long cr_isr;
+ unsigned long vsa_base;
+ unsigned long dirty_log_lock_pa;
+ unsigned long __gp;
+ /* TR and TC. */
+ struct thash_data itrs[NITRS];
+ struct thash_data dtrs[NDTRS];
+ /* Bit is set if there is a tr/tc for the region. */
+ unsigned char itr_regions;
+ unsigned char dtr_regions;
+ unsigned char tc_regions;
+ /* purge all */
+ unsigned long ptce_base;
+ unsigned long ptce_count[2];
+ unsigned long ptce_stride[2];
+ /* itc/itm */
+ unsigned long last_itc;
+ long itc_offset;
+ unsigned long itc_check;
+ unsigned long timer_check;
+ unsigned long timer_pending;
+
+ unsigned long vrr[8];
+ unsigned long ibr[8];
+ unsigned long dbr[8];
+ unsigned long insvc[4]; /* Interrupt in service. */
+ unsigned long xtp;
+
+ unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
+ unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
+ unsigned long metaphysical_saved_rr0; /* from kvm_arch */
+ unsigned long metaphysical_saved_rr4; /* from kvm_arch */
+ unsigned long fp_psr; /*used for lazy float register */
+ unsigned long saved_gp;
+ /*for phycial emulation */
+ int mode_flags;
+ struct thash_cb vtlb;
+ struct thash_cb vhpt;
+ char irq_check;
+ char irq_new_pending;
+
+ unsigned long opcode;
+ unsigned long cause;
+ union context host;
+ union context guest;
+};
+
+struct kvm_vm_stat {
+ u64 remote_tlb_flush;
+};
+
+struct kvm_sal_data {
+ unsigned long boot_ip;
+ unsigned long boot_gp;
+};
+
+struct kvm_arch {
+ unsigned long vm_base;
+ unsigned long metaphysical_rr0;
+ unsigned long metaphysical_rr4;
+ unsigned long vmm_init_rr;
+ unsigned long vhpt_base;
+ unsigned long vtlb_base;
+ unsigned long vpd_base;
+ spinlock_t dirty_log_lock;
+ struct kvm_ioapic *vioapic;
+ struct kvm_vm_stat stat;
+ struct kvm_sal_data rdv_sal_data;
+};
+
+union cpuid3_t {
+ u64 value;
+ struct {
+ u64 number : 8;
+ u64 revision : 8;
+ u64 model : 8;
+ u64 family : 8;
+ u64 archrev : 8;
+ u64 rv : 24;
+ };
+};
+
+struct kvm_pt_regs {
+ /* The following registers are saved by SAVE_MIN: */
+ unsigned long b6; /* scratch */
+ unsigned long b7; /* scratch */
+
+ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
+ unsigned long ar_ssd; /* reserved for future use (scratch) */
+
+ unsigned long r8; /* scratch (return value register 0) */
+ unsigned long r9; /* scratch (return value register 1) */
+ unsigned long r10; /* scratch (return value register 2) */
+ unsigned long r11; /* scratch (return value register 3) */
+
+ unsigned long cr_ipsr; /* interrupted task's psr */
+ unsigned long cr_iip; /* interrupted task's instruction pointer */
+ unsigned long cr_ifs; /* interrupted task's function state */
+
+ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
+ unsigned long ar_pfs; /* prev function state */
+ unsigned long ar_rsc; /* RSE configuration */
+ /* The following two are valid only if cr_ipsr.cpl > 0: */
+ unsigned long ar_rnat; /* RSE NaT */
+ unsigned long ar_bspstore; /* RSE bspstore */
+
+ unsigned long pr; /* 64 predicate registers (1 bit each) */
+ unsigned long b0; /* return pointer (bp) */
+ unsigned long loadrs; /* size of dirty partition << 16 */
+
+ unsigned long r1; /* the gp pointer */
+ unsigned long r12; /* interrupted task's memory stack pointer */
+ unsigned long r13; /* thread pointer */
+
+ unsigned long ar_fpsr; /* floating point status (preserved) */
+ unsigned long r15; /* scratch */
+
+ /* The remaining registers are NOT saved for system calls. */
+ unsigned long r14; /* scratch */
+ unsigned long r2; /* scratch */
+ unsigned long r3; /* scratch */
+ unsigned long r16; /* scratch */
+ unsigned long r17; /* scratch */
+ unsigned long r18; /* scratch */
+ unsigned long r19; /* scratch */
+ unsigned long r20; /* scratch */
+ unsigned long r21; /* scratch */
+ unsigned long r22; /* scratch */
+ unsigned long r23; /* scratch */
+ unsigned long r24; /* scratch */
+ unsigned long r25; /* scratch */
+ unsigned long r26; /* scratch */
+ unsigned long r27; /* scratch */
+ unsigned long r28; /* scratch */
+ unsigned long r29; /* scratch */
+ unsigned long r30; /* scratch */
+ unsigned long r31; /* scratch */
+ unsigned long ar_ccv; /* compare/exchange value (scratch) */
+
+ /*
+ * Floating point registers that the kernel considers scratch:
+ */
+ struct ia64_fpreg f6; /* scratch */
+ struct ia64_fpreg f7; /* scratch */
+ struct ia64_fpreg f8; /* scratch */
+ struct ia64_fpreg f9; /* scratch */
+ struct ia64_fpreg f10; /* scratch */
+ struct ia64_fpreg f11; /* scratch */
+
+ unsigned long r4; /* preserved */
+ unsigned long r5; /* preserved */
+ unsigned long r6; /* preserved */
+ unsigned long r7; /* preserved */
+ unsigned long eml_unat; /* used for emulating instruction */
+ unsigned long pad0; /* alignment pad */
+};
+
+static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v)
+{
+ return (struct kvm_pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
+}
+
+typedef int kvm_vmm_entry(void);
+typedef void kvm_tramp_entry(union context *host, union context *guest);
+
+struct kvm_vmm_info{
+ struct module *module;
+ kvm_vmm_entry *vmm_entry;
+ kvm_tramp_entry *tramp_entry;
+ unsigned long vmm_ivt;
+};
+
+int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
+int kvm_emulate_halt(struct kvm_vcpu *vcpu);
+int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
+void kvm_sal_emul(struct kvm_vcpu *vcpu);
+
+#endif
diff --git a/include/asm-ia64/kvm_para.h b/include/asm-ia64/kvm_para.h
new file mode 100644
index 000000000000..9f9796bb3441
--- /dev/null
+++ b/include/asm-ia64/kvm_para.h
@@ -0,0 +1,29 @@
+#ifndef __IA64_KVM_PARA_H
+#define __IA64_KVM_PARA_H
+
+/*
+ * asm-ia64/kvm_para.h
+ *
+ * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+#endif
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 4999a6c63775..36f39321b768 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -54,9 +54,6 @@
# define HPAGE_MASK (~(HPAGE_SIZE - 1))
# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
-# define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
-# define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
#endif /* CONFIG_HUGETLB_PAGE */
#ifdef __ASSEMBLY__
@@ -153,9 +150,6 @@ typedef union ia64_va {
# define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \
| (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-# define is_hugepage_only_range(mm, addr, len) \
- (REGION_NUMBER(addr) == RGN_HPAGE || \
- REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE)
extern unsigned int hpage_shift;
#endif
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index ed70862ea247..7a9bff47564f 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -302,6 +302,8 @@ ia64_phys_addr_valid (unsigned long addr)
#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0)
+#define pte_special(pte) 0
+
/*
* Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
* access rights:
@@ -313,6 +315,7 @@ ia64_phys_addr_valid (unsigned long addr)
#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
#define pte_mkhuge(pte) (__pte(pte_val(pte)))
+#define pte_mkspecial(pte) (pte)
/*
* Because ia64's Icache and Dcache is not coherent (on a cpu), we need to
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 741f7ecb986a..6aff126fc07e 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -119,6 +119,69 @@ struct ia64_psr {
__u64 reserved4 : 19;
};
+union ia64_isr {
+ __u64 val;
+ struct {
+ __u64 code : 16;
+ __u64 vector : 8;
+ __u64 reserved1 : 8;
+ __u64 x : 1;
+ __u64 w : 1;
+ __u64 r : 1;
+ __u64 na : 1;
+ __u64 sp : 1;
+ __u64 rs : 1;
+ __u64 ir : 1;
+ __u64 ni : 1;
+ __u64 so : 1;
+ __u64 ei : 2;
+ __u64 ed : 1;
+ __u64 reserved2 : 20;
+ };
+};
+
+union ia64_lid {
+ __u64 val;
+ struct {
+ __u64 rv : 16;
+ __u64 eid : 8;
+ __u64 id : 8;
+ __u64 ig : 32;
+ };
+};
+
+union ia64_tpr {
+ __u64 val;
+ struct {
+ __u64 ig0 : 4;
+ __u64 mic : 4;
+ __u64 rsv : 8;
+ __u64 mmi : 1;
+ __u64 ig1 : 47;
+ };
+};
+
+union ia64_itir {
+ __u64 val;
+ struct {
+ __u64 rv3 : 2; /* 0-1 */
+ __u64 ps : 6; /* 2-7 */
+ __u64 key : 24; /* 8-31 */
+ __u64 rv4 : 32; /* 32-63 */
+ };
+};
+
+union ia64_rr {
+ __u64 val;
+ struct {
+ __u64 ve : 1; /* enable hw walker */
+ __u64 reserved0: 1; /* reserved */
+ __u64 ps : 6; /* log page size */
+ __u64 rid : 24; /* region id */
+ __u64 reserved1: 32; /* reserved */
+ };
+};
+
/*
* CPU type, hardware bug flags, and per-CPU state. Frequently used
* state comes earlier:
diff --git a/include/asm-m32r/pgtable.h b/include/asm-m32r/pgtable.h
index 86505387be08..e6359c566b50 100644
--- a/include/asm-m32r/pgtable.h
+++ b/include/asm-m32r/pgtable.h
@@ -214,6 +214,11 @@ static inline int pte_file(pte_t pte)
return pte_val(pte) & _PAGE_FILE;
}
+static inline int pte_special(pte_t pte)
+{
+ return 0;
+}
+
static inline pte_t pte_mkclean(pte_t pte)
{
pte_val(pte) &= ~_PAGE_DIRTY;
@@ -250,6 +255,11 @@ static inline pte_t pte_mkwrite(pte_t pte)
return pte;
}
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return pte;
+}
+
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
diff --git a/include/asm-m68k/motorola_pgtable.h b/include/asm-m68k/motorola_pgtable.h
index 13135d4821d8..8e9a8a754dde 100644
--- a/include/asm-m68k/motorola_pgtable.h
+++ b/include/asm-m68k/motorola_pgtable.h
@@ -168,6 +168,7 @@ static inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RONLY);
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; }
static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
@@ -185,6 +186,7 @@ static inline pte_t pte_mkcache(pte_t pte)
pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_supervisor_cachemode;
return pte;
}
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
diff --git a/include/asm-m68k/sun3_pgtable.h b/include/asm-m68k/sun3_pgtable.h
index b766fc261bde..f847ec732d62 100644
--- a/include/asm-m68k/sun3_pgtable.h
+++ b/include/asm-m68k/sun3_pgtable.h
@@ -169,6 +169,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & SUN3_PAGE_WRITEA
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & SUN3_PAGE_MODIFIED; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESSED; }
+static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_WRITEABLE; return pte; }
static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_MODIFIED; return pte; }
@@ -181,6 +182,7 @@ static inline pte_t pte_mknocache(pte_t pte) { pte_val(pte) |= SUN3_PAGE_NOCACHE
//static inline pte_t pte_mkcache(pte_t pte) { pte_val(pte) &= SUN3_PAGE_NOCACHE; return pte; }
// until then, use:
static inline pte_t pte_mkcache(pte_t pte) { return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index 17a7703a2969..782221e57c0a 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -285,6 +285,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
return pte;
}
#endif
+static inline int pte_special(pte_t pte) { return 0; }
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/*
* Macro to make mark a page protection value as "uncacheable". Note
diff --git a/include/asm-mips/vr41xx/siu.h b/include/asm-mips/vr41xx/siu.h
index 98cdb4096485..da9f6e373409 100644
--- a/include/asm-mips/vr41xx/siu.h
+++ b/include/asm-mips/vr41xx/siu.h
@@ -1,7 +1,7 @@
/*
* Include file for NEC VR4100 series Serial Interface Unit.
*
- * Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2005-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -49,4 +49,10 @@ typedef enum {
extern void vr41xx_select_irda_module(irda_module_t module, irda_speed_t speed);
+#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
+extern void vr41xx_siu_early_setup(struct uart_port *port);
+#else
+static inline void vr41xx_siu_early_setup(struct uart_port *port) {}
+#endif
+
#endif /* __NEC_VR41XX_SIU_H */
diff --git a/include/asm-mips/vr41xx/vr41xx.h b/include/asm-mips/vr41xx/vr41xx.h
index 88b492f6ea9c..22be64971cc6 100644
--- a/include/asm-mips/vr41xx/vr41xx.h
+++ b/include/asm-mips/vr41xx/vr41xx.h
@@ -7,7 +7,7 @@
* Copyright (C) 2001, 2002 Paul Mundt
* Copyright (C) 2002 MontaVista Software, Inc.
* Copyright (C) 2002 TimeSys Corp.
- * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2003-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -143,4 +143,10 @@ extern void vr41xx_disable_csiint(uint16_t mask);
extern void vr41xx_enable_bcuint(void);
extern void vr41xx_disable_bcuint(void);
+#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
+extern void vr41xx_siu_setup(void);
+#else
+static inline void vr41xx_siu_setup(void) {}
+#endif
+
#endif /* __NEC_VR41XX_H */
diff --git a/include/asm-mn10300/pgtable.h b/include/asm-mn10300/pgtable.h
index 375c4941deda..6dc30fc827c4 100644
--- a/include/asm-mn10300/pgtable.h
+++ b/include/asm-mn10300/pgtable.h
@@ -224,6 +224,7 @@ static inline int pte_read(pte_t pte) { return pte_val(pte) & __PAGE_PROT_USER;
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & __PAGE_PROT_WRITE; }
+static inline int pte_special(pte_t pte){ return 0; }
/*
* The following only works if pte_present() is not true.
@@ -265,6 +266,8 @@ static inline pte_t pte_mkwrite(pte_t pte)
return pte;
}
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
#define pte_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
__FILE__, __LINE__, pte_val(e))
diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h
index dc86adbec916..470a4b88124d 100644
--- a/include/asm-parisc/pgtable.h
+++ b/include/asm-parisc/pgtable.h
@@ -323,6 +323,7 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
@@ -330,6 +331,7 @@ static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; ret
static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/*
* Conversion functions: convert a page and protection to a page entry,
diff --git a/include/asm-powerpc/hugetlb.h b/include/asm-powerpc/hugetlb.h
new file mode 100644
index 000000000000..649c6c3b87b3
--- /dev/null
+++ b/include/asm-powerpc/hugetlb.h
@@ -0,0 +1,79 @@
+#ifndef _ASM_POWERPC_HUGETLB_H
+#define _ASM_POWERPC_HUGETLB_H
+
+#include <asm/page.h>
+
+
+int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
+ unsigned long len);
+
+void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
+ unsigned long end, unsigned long floor,
+ unsigned long ceiling);
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
+{
+ if (len & ~HPAGE_MASK)
+ return -EINVAL;
+ if (addr & ~HPAGE_MASK)
+ return -EINVAL;
+ return 0;
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+{
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+ return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+#endif /* _ASM_POWERPC_HUGETLB_H */
diff --git a/include/asm-powerpc/kvm.h b/include/asm-powerpc/kvm.h
index d1b530fbf8dd..f993e4198d5c 100644
--- a/include/asm-powerpc/kvm.h
+++ b/include/asm-powerpc/kvm.h
@@ -1,6 +1,55 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
#ifndef __LINUX_KVM_POWERPC_H
#define __LINUX_KVM_POWERPC_H
-/* powerpc does not support KVM */
+#include <asm/types.h>
+
+struct kvm_regs {
+ __u64 pc;
+ __u64 cr;
+ __u64 ctr;
+ __u64 lr;
+ __u64 xer;
+ __u64 msr;
+ __u64 srr0;
+ __u64 srr1;
+ __u64 pid;
+
+ __u64 sprg0;
+ __u64 sprg1;
+ __u64 sprg2;
+ __u64 sprg3;
+ __u64 sprg4;
+ __u64 sprg5;
+ __u64 sprg6;
+ __u64 sprg7;
+
+ __u64 gpr[32];
+};
+
+struct kvm_sregs {
+};
+
+struct kvm_fpu {
+ __u64 fpr[32];
+};
-#endif
+#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/include/asm-powerpc/kvm_asm.h b/include/asm-powerpc/kvm_asm.h
new file mode 100644
index 000000000000..2197764796d9
--- /dev/null
+++ b/include/asm-powerpc/kvm_asm.h
@@ -0,0 +1,55 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_ASM_H__
+#define __POWERPC_KVM_ASM_H__
+
+/* IVPR must be 64KiB-aligned. */
+#define VCPU_SIZE_ORDER 4
+#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
+#define VCPU_TLB_PGSZ PPC44x_TLB_64K
+#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG)
+
+#define BOOKE_INTERRUPT_CRITICAL 0
+#define BOOKE_INTERRUPT_MACHINE_CHECK 1
+#define BOOKE_INTERRUPT_DATA_STORAGE 2
+#define BOOKE_INTERRUPT_INST_STORAGE 3
+#define BOOKE_INTERRUPT_EXTERNAL 4
+#define BOOKE_INTERRUPT_ALIGNMENT 5
+#define BOOKE_INTERRUPT_PROGRAM 6
+#define BOOKE_INTERRUPT_FP_UNAVAIL 7
+#define BOOKE_INTERRUPT_SYSCALL 8
+#define BOOKE_INTERRUPT_AP_UNAVAIL 9
+#define BOOKE_INTERRUPT_DECREMENTER 10
+#define BOOKE_INTERRUPT_FIT 11
+#define BOOKE_INTERRUPT_WATCHDOG 12
+#define BOOKE_INTERRUPT_DTLB_MISS 13
+#define BOOKE_INTERRUPT_ITLB_MISS 14
+#define BOOKE_INTERRUPT_DEBUG 15
+#define BOOKE_MAX_INTERRUPT 15
+
+#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
+#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
+
+#define RESUME_GUEST 0
+#define RESUME_GUEST_NV RESUME_FLAG_NV
+#define RESUME_HOST RESUME_FLAG_HOST
+#define RESUME_HOST_NV (RESUME_FLAG_HOST|RESUME_FLAG_NV)
+
+#endif /* __POWERPC_KVM_ASM_H__ */
diff --git a/include/asm-powerpc/kvm_host.h b/include/asm-powerpc/kvm_host.h
new file mode 100644
index 000000000000..04ffbb8e0a35
--- /dev/null
+++ b/include/asm-powerpc/kvm_host.h
@@ -0,0 +1,152 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_HOST_H__
+#define __POWERPC_KVM_HOST_H__
+
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/kvm_types.h>
+#include <asm/kvm_asm.h>
+
+#define KVM_MAX_VCPUS 1
+#define KVM_MEMORY_SLOTS 32
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+/* We don't currently support large pages. */
+#define KVM_PAGES_PER_HPAGE (1<<31)
+
+struct kvm;
+struct kvm_run;
+struct kvm_vcpu;
+
+struct kvm_vm_stat {
+ u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+ u32 sum_exits;
+ u32 mmio_exits;
+ u32 dcr_exits;
+ u32 signal_exits;
+ u32 light_exits;
+ /* Account for special types of light exits: */
+ u32 itlb_real_miss_exits;
+ u32 itlb_virt_miss_exits;
+ u32 dtlb_real_miss_exits;
+ u32 dtlb_virt_miss_exits;
+ u32 syscall_exits;
+ u32 isi_exits;
+ u32 dsi_exits;
+ u32 emulated_inst_exits;
+ u32 dec_exits;
+ u32 ext_intr_exits;
+};
+
+struct tlbe {
+ u32 tid; /* Only the low 8 bits are used. */
+ u32 word0;
+ u32 word1;
+ u32 word2;
+};
+
+struct kvm_arch {
+};
+
+struct kvm_vcpu_arch {
+ /* Unmodified copy of the guest's TLB. */
+ struct tlbe guest_tlb[PPC44x_TLB_SIZE];
+ /* TLB that's actually used when the guest is running. */
+ struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
+ /* Pages which are referenced in the shadow TLB. */
+ struct page *shadow_pages[PPC44x_TLB_SIZE];
+ /* Copy of the host's TLB. */
+ struct tlbe host_tlb[PPC44x_TLB_SIZE];
+
+ u32 host_stack;
+ u32 host_pid;
+
+ u64 fpr[32];
+ u32 gpr[32];
+
+ u32 pc;
+ u32 cr;
+ u32 ctr;
+ u32 lr;
+ u32 xer;
+
+ u32 msr;
+ u32 mmucr;
+ u32 sprg0;
+ u32 sprg1;
+ u32 sprg2;
+ u32 sprg3;
+ u32 sprg4;
+ u32 sprg5;
+ u32 sprg6;
+ u32 sprg7;
+ u32 srr0;
+ u32 srr1;
+ u32 csrr0;
+ u32 csrr1;
+ u32 dsrr0;
+ u32 dsrr1;
+ u32 dear;
+ u32 esr;
+ u32 dec;
+ u32 decar;
+ u32 tbl;
+ u32 tbu;
+ u32 tcr;
+ u32 tsr;
+ u32 ivor[16];
+ u32 ivpr;
+ u32 pir;
+ u32 pid;
+ u32 pvr;
+ u32 ccr0;
+ u32 ccr1;
+ u32 dbcr0;
+ u32 dbcr1;
+
+ u32 last_inst;
+ u32 fault_dear;
+ u32 fault_esr;
+ gpa_t paddr_accessed;
+
+ u8 io_gpr; /* GPR used as IO source/target */
+ u8 mmio_is_bigendian;
+ u8 dcr_needed;
+ u8 dcr_is_write;
+
+ u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
+
+ struct timer_list dec_timer;
+ unsigned long pending_exceptions;
+};
+
+struct kvm_guest_debug {
+ int enabled;
+ unsigned long bp[4];
+ int singlestep;
+};
+
+#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/include/asm-powerpc/kvm_para.h b/include/asm-powerpc/kvm_para.h
new file mode 100644
index 000000000000..2d48f6a63d0b
--- /dev/null
+++ b/include/asm-powerpc/kvm_para.h
@@ -0,0 +1,37 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_PARA_H__
+#define __POWERPC_KVM_PARA_H__
+
+#ifdef __KERNEL__
+
+static inline int kvm_para_available(void)
+{
+ return 0;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __POWERPC_KVM_PARA_H__ */
diff --git a/include/asm-powerpc/kvm_ppc.h b/include/asm-powerpc/kvm_ppc.h
new file mode 100644
index 000000000000..7ac820308a7e
--- /dev/null
+++ b/include/asm-powerpc/kvm_ppc.h
@@ -0,0 +1,88 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_PPC_H__
+#define __POWERPC_KVM_PPC_H__
+
+/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
+ * dependencies. */
+
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/kvm_types.h>
+#include <linux/kvm_host.h>
+
+struct kvm_tlb {
+ struct tlbe guest_tlb[PPC44x_TLB_SIZE];
+ struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
+};
+
+enum emulation_result {
+ EMULATE_DONE, /* no further processing */
+ EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
+ EMULATE_DO_DCR, /* kvm_run filled with DCR request */
+ EMULATE_FAIL, /* can't emulate this instruction */
+};
+
+extern const unsigned char exception_priority[];
+extern const unsigned char priority_exception[];
+
+extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
+extern char kvmppc_handlers_start[];
+extern unsigned long kvmppc_handler_len;
+
+extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
+extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes,
+ int is_bigendian);
+extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ u32 val, unsigned int bytes, int is_bigendian);
+
+extern int kvmppc_emulate_instruction(struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn,
+ u64 asid, u32 flags);
+extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid);
+extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
+
+extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu);
+
+static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception)
+{
+ unsigned int priority = exception_priority[exception];
+ set_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception)
+{
+ unsigned int priority = exception_priority[exception];
+ clear_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
+{
+ if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
+ kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
+
+ vcpu->arch.msr = new_msr;
+}
+
+#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/include/asm-powerpc/mmu-44x.h b/include/asm-powerpc/mmu-44x.h
index c8b02d97f753..a825524c981a 100644
--- a/include/asm-powerpc/mmu-44x.h
+++ b/include/asm-powerpc/mmu-44x.h
@@ -53,6 +53,8 @@
#ifndef __ASSEMBLY__
+extern unsigned int tlb_44x_hwater;
+
typedef struct {
unsigned long id;
unsigned long vdso_base;
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h
index 67834eae5702..25af4fc8daf4 100644
--- a/include/asm-powerpc/page_64.h
+++ b/include/asm-powerpc/page_64.h
@@ -128,11 +128,6 @@ extern void slice_init_context(struct mm_struct *mm, unsigned int psize);
extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
#define slice_mm_new_context(mm) ((mm)->context.id == 0)
-#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
-extern int is_hugepage_only_range(struct mm_struct *m,
- unsigned long addr,
- unsigned long len);
-
#endif /* __ASSEMBLY__ */
#else
#define slice_init()
@@ -146,8 +141,6 @@ do { \
#ifdef CONFIG_HUGETLB_PAGE
-#define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
-#define ARCH_HAS_SETCLEAR_HUGE_PTE
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif /* !CONFIG_HUGETLB_PAGE */
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index daea7692d070..7c97b5a08d08 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -504,6 +504,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte) { return 0; }
static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
@@ -521,6 +522,8 @@ static inline pte_t pte_mkdirty(pte_t pte) {
pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) {
pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) {
+ return pte; }
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
index dd4c26dc57d2..27f18695f7d6 100644
--- a/include/asm-powerpc/pgtable-ppc64.h
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -239,6 +239,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
+static inline int pte_special(pte_t pte) { return 0; }
static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
@@ -257,6 +258,8 @@ static inline pte_t pte_mkyoung(pte_t pte) {
pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkhuge(pte_t pte) {
return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) {
+ return pte; }
/* Atomic PTE updates */
static inline unsigned long pte_update(struct mm_struct *mm,
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index 70435d32129a..55f9d38e3bf8 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -483,6 +483,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte) { return 0; }
static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
@@ -500,6 +501,8 @@ static inline pte_t pte_mkdirty(pte_t pte) {
pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) {
pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) {
+ return pte; }
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
diff --git a/include/asm-s390/Kbuild b/include/asm-s390/Kbuild
index e92b429d2be1..13c9805349f1 100644
--- a/include/asm-s390/Kbuild
+++ b/include/asm-s390/Kbuild
@@ -7,6 +7,7 @@ header-y += tape390.h
header-y += ucontext.h
header-y += vtoc.h
header-y += zcrypt.h
+header-y += kvm.h
unifdef-y += cmb.h
unifdef-y += debug.h
diff --git a/include/asm-s390/kvm.h b/include/asm-s390/kvm.h
index 573f2a351386..d74002f95794 100644
--- a/include/asm-s390/kvm.h
+++ b/include/asm-s390/kvm.h
@@ -1,6 +1,45 @@
#ifndef __LINUX_KVM_S390_H
#define __LINUX_KVM_S390_H
-/* s390 does not support KVM */
+/*
+ * asm-s390/kvm.h - KVM s390 specific structures and definitions
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+#include <asm/types.h>
+
+/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
+struct kvm_pic_state {
+ /* no PIC for s390 */
+};
+
+struct kvm_ioapic_state {
+ /* no IOAPIC for s390 */
+};
+
+/* for KVM_GET_REGS and KVM_SET_REGS */
+struct kvm_regs {
+ /* general purpose regs for s390 */
+ __u64 gprs[16];
+};
+
+/* for KVM_GET_SREGS and KVM_SET_SREGS */
+struct kvm_sregs {
+ __u32 acrs[16];
+ __u64 crs[16];
+};
+
+/* for KVM_GET_FPU and KVM_SET_FPU */
+struct kvm_fpu {
+ __u32 fpc;
+ __u64 fprs[16];
+};
#endif
diff --git a/include/asm-s390/kvm_host.h b/include/asm-s390/kvm_host.h
new file mode 100644
index 000000000000..f8204a4f2e02
--- /dev/null
+++ b/include/asm-s390/kvm_host.h
@@ -0,0 +1,234 @@
+/*
+ * asm-s390/kvm_host.h - definition for kernel virtual machines on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ */
+
+
+#ifndef ASM_KVM_HOST_H
+#define ASM_KVM_HOST_H
+#include <linux/kvm_host.h>
+#include <asm/debug.h>
+
+#define KVM_MAX_VCPUS 64
+#define KVM_MEMORY_SLOTS 32
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+struct kvm_guest_debug {
+};
+
+struct sca_entry {
+ atomic_t scn;
+ __u64 reserved;
+ __u64 sda;
+ __u64 reserved2[2];
+} __attribute__((packed));
+
+
+struct sca_block {
+ __u64 ipte_control;
+ __u64 reserved[5];
+ __u64 mcn;
+ __u64 reserved2;
+ struct sca_entry cpu[64];
+} __attribute__((packed));
+
+#define KVM_PAGES_PER_HPAGE 256
+
+#define CPUSTAT_HOST 0x80000000
+#define CPUSTAT_WAIT 0x10000000
+#define CPUSTAT_ECALL_PEND 0x08000000
+#define CPUSTAT_STOP_INT 0x04000000
+#define CPUSTAT_IO_INT 0x02000000
+#define CPUSTAT_EXT_INT 0x01000000
+#define CPUSTAT_RUNNING 0x00800000
+#define CPUSTAT_RETAINED 0x00400000
+#define CPUSTAT_TIMING_SUB 0x00020000
+#define CPUSTAT_SIE_SUB 0x00010000
+#define CPUSTAT_RRF 0x00008000
+#define CPUSTAT_SLSV 0x00004000
+#define CPUSTAT_SLSR 0x00002000
+#define CPUSTAT_ZARCH 0x00000800
+#define CPUSTAT_MCDS 0x00000100
+#define CPUSTAT_SM 0x00000080
+#define CPUSTAT_G 0x00000008
+#define CPUSTAT_J 0x00000002
+#define CPUSTAT_P 0x00000001
+
+struct sie_block {
+ atomic_t cpuflags; /* 0x0000 */
+ __u32 prefix; /* 0x0004 */
+ __u8 reserved8[32]; /* 0x0008 */
+ __u64 cputm; /* 0x0028 */
+ __u64 ckc; /* 0x0030 */
+ __u64 epoch; /* 0x0038 */
+ __u8 reserved40[4]; /* 0x0040 */
+#define LCTL_CR0 0x8000
+ __u16 lctl; /* 0x0044 */
+ __s16 icpua; /* 0x0046 */
+ __u32 ictl; /* 0x0048 */
+ __u32 eca; /* 0x004c */
+ __u8 icptcode; /* 0x0050 */
+ __u8 reserved51; /* 0x0051 */
+ __u16 ihcpu; /* 0x0052 */
+ __u8 reserved54[2]; /* 0x0054 */
+ __u16 ipa; /* 0x0056 */
+ __u32 ipb; /* 0x0058 */
+ __u32 scaoh; /* 0x005c */
+ __u8 reserved60; /* 0x0060 */
+ __u8 ecb; /* 0x0061 */
+ __u8 reserved62[2]; /* 0x0062 */
+ __u32 scaol; /* 0x0064 */
+ __u8 reserved68[4]; /* 0x0068 */
+ __u32 todpr; /* 0x006c */
+ __u8 reserved70[16]; /* 0x0070 */
+ __u64 gmsor; /* 0x0080 */
+ __u64 gmslm; /* 0x0088 */
+ psw_t gpsw; /* 0x0090 */
+ __u64 gg14; /* 0x00a0 */
+ __u64 gg15; /* 0x00a8 */
+ __u8 reservedb0[30]; /* 0x00b0 */
+ __u16 iprcc; /* 0x00ce */
+ __u8 reservedd0[48]; /* 0x00d0 */
+ __u64 gcr[16]; /* 0x0100 */
+ __u64 gbea; /* 0x0180 */
+ __u8 reserved188[120]; /* 0x0188 */
+} __attribute__((packed));
+
+struct kvm_vcpu_stat {
+ u32 exit_userspace;
+ u32 exit_external_request;
+ u32 exit_external_interrupt;
+ u32 exit_stop_request;
+ u32 exit_validity;
+ u32 exit_instruction;
+ u32 instruction_lctl;
+ u32 instruction_lctg;
+ u32 exit_program_interruption;
+ u32 exit_instr_and_program;
+ u32 deliver_emergency_signal;
+ u32 deliver_service_signal;
+ u32 deliver_virtio_interrupt;
+ u32 deliver_stop_signal;
+ u32 deliver_prefix_signal;
+ u32 deliver_restart_signal;
+ u32 deliver_program_int;
+ u32 exit_wait_state;
+ u32 instruction_stidp;
+ u32 instruction_spx;
+ u32 instruction_stpx;
+ u32 instruction_stap;
+ u32 instruction_storage_key;
+ u32 instruction_stsch;
+ u32 instruction_chsc;
+ u32 instruction_stsi;
+ u32 instruction_stfl;
+ u32 instruction_sigp_sense;
+ u32 instruction_sigp_emergency;
+ u32 instruction_sigp_stop;
+ u32 instruction_sigp_arch;
+ u32 instruction_sigp_prefix;
+ u32 instruction_sigp_restart;
+ u32 diagnose_44;
+};
+
+struct io_info {
+ __u16 subchannel_id; /* 0x0b8 */
+ __u16 subchannel_nr; /* 0x0ba */
+ __u32 io_int_parm; /* 0x0bc */
+ __u32 io_int_word; /* 0x0c0 */
+};
+
+struct ext_info {
+ __u32 ext_params;
+ __u64 ext_params2;
+};
+
+#define PGM_OPERATION 0x01
+#define PGM_PRIVILEGED_OPERATION 0x02
+#define PGM_EXECUTE 0x03
+#define PGM_PROTECTION 0x04
+#define PGM_ADDRESSING 0x05
+#define PGM_SPECIFICATION 0x06
+#define PGM_DATA 0x07
+
+struct pgm_info {
+ __u16 code;
+};
+
+struct prefix_info {
+ __u32 address;
+};
+
+struct interrupt_info {
+ struct list_head list;
+ u64 type;
+ union {
+ struct io_info io;
+ struct ext_info ext;
+ struct pgm_info pgm;
+ struct prefix_info prefix;
+ };
+};
+
+/* for local_interrupt.action_flags */
+#define ACTION_STORE_ON_STOP 1
+#define ACTION_STOP_ON_STOP 2
+
+struct local_interrupt {
+ spinlock_t lock;
+ struct list_head list;
+ atomic_t active;
+ struct float_interrupt *float_int;
+ int timer_due; /* event indicator for waitqueue below */
+ wait_queue_head_t wq;
+ atomic_t *cpuflags;
+ unsigned int action_bits;
+};
+
+struct float_interrupt {
+ spinlock_t lock;
+ struct list_head list;
+ atomic_t active;
+ int next_rr_cpu;
+ unsigned long idle_mask [(64 + sizeof(long) - 1) / sizeof(long)];
+ struct local_interrupt *local_int[64];
+};
+
+
+struct kvm_vcpu_arch {
+ struct sie_block *sie_block;
+ unsigned long guest_gprs[16];
+ s390_fp_regs host_fpregs;
+ unsigned int host_acrs[NUM_ACRS];
+ s390_fp_regs guest_fpregs;
+ unsigned int guest_acrs[NUM_ACRS];
+ struct local_interrupt local_int;
+ struct timer_list ckc_timer;
+ union {
+ cpuid_t cpu_id;
+ u64 stidp_data;
+ };
+};
+
+struct kvm_vm_stat {
+ u32 remote_tlb_flush;
+};
+
+struct kvm_arch{
+ unsigned long guest_origin;
+ unsigned long guest_memsize;
+ struct sca_block *sca;
+ debug_info_t *dbf;
+ struct float_interrupt float_int;
+};
+
+extern int sie64a(struct sie_block *, __u64 *);
+#endif
diff --git a/include/asm-s390/kvm_para.h b/include/asm-s390/kvm_para.h
new file mode 100644
index 000000000000..2c503796b619
--- /dev/null
+++ b/include/asm-s390/kvm_para.h
@@ -0,0 +1,150 @@
+/*
+ * asm-s390/kvm_para.h - definition for paravirtual devices on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#ifndef __S390_KVM_PARA_H
+#define __S390_KVM_PARA_H
+
+/*
+ * Hypercalls for KVM on s390. The calling convention is similar to the
+ * s390 ABI, so we use R2-R6 for parameters 1-5. In addition we use R1
+ * as hypercall number and R7 as parameter 6. The return value is
+ * written to R2. We use the diagnose instruction as hypercall. To avoid
+ * conflicts with existing diagnoses for LPAR and z/VM, we do not use
+ * the instruction encoded number, but specify the number in R1 and
+ * use 0x500 as KVM hypercall
+ *
+ * Copyright IBM Corp. 2007,2008
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+
+static inline long kvm_hypercall0(unsigned long nr)
+{
+ register unsigned long __nr asm("1") = nr;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr): "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall1(unsigned long nr, unsigned long p1)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1) : "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall2(unsigned long nr, unsigned long p1,
+ unsigned long p2)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2)
+ : "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall3(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register unsigned long __p3 asm("4") = p3;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+ "d" (__p3) : "memory", "cc");
+ return __rc;
+}
+
+
+static inline long kvm_hypercall4(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register unsigned long __p3 asm("4") = p3;
+ register unsigned long __p4 asm("5") = p4;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+ "d" (__p3), "d" (__p4) : "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall5(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4, unsigned long p5)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register unsigned long __p3 asm("4") = p3;
+ register unsigned long __p4 asm("5") = p4;
+ register unsigned long __p5 asm("6") = p5;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+ "d" (__p3), "d" (__p4), "d" (__p5) : "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall6(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4, unsigned long p5,
+ unsigned long p6)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register unsigned long __p3 asm("4") = p3;
+ register unsigned long __p4 asm("5") = p4;
+ register unsigned long __p5 asm("6") = p5;
+ register unsigned long __p6 asm("7") = p6;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+ "d" (__p3), "d" (__p4), "d" (__p5), "d" (__p6)
+ : "memory", "cc");
+ return __rc;
+}
+
+/* kvm on s390 is always paravirtualization enabled */
+static inline int kvm_para_available(void)
+{
+ return 1;
+}
+
+/* No feature bits are currently assigned for kvm on s390 */
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+#endif /* __S390_KVM_PARA_H */
diff --git a/include/asm-s390/kvm_virtio.h b/include/asm-s390/kvm_virtio.h
new file mode 100644
index 000000000000..5c871a990c29
--- /dev/null
+++ b/include/asm-s390/kvm_virtio.h
@@ -0,0 +1,53 @@
+/*
+ * kvm_virtio.h - definition for virtio for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#ifndef __KVM_S390_VIRTIO_H
+#define __KVM_S390_VIRTIO_H
+
+#include <linux/types.h>
+
+struct kvm_device_desc {
+ /* The device type: console, network, disk etc. Type 0 terminates. */
+ __u8 type;
+ /* The number of virtqueues (first in config array) */
+ __u8 num_vq;
+ /*
+ * The number of bytes of feature bits. Multiply by 2: one for host
+ * features and one for guest acknowledgements.
+ */
+ __u8 feature_len;
+ /* The number of bytes of the config array after virtqueues. */
+ __u8 config_len;
+ /* A status byte, written by the Guest. */
+ __u8 status;
+ __u8 config[0];
+};
+
+/*
+ * This is how we expect the device configuration field for a virtqueue
+ * to be laid out in config space.
+ */
+struct kvm_vqconfig {
+ /* The token returned with an interrupt. Set by the guest */
+ __u64 token;
+ /* The address of the virtio ring */
+ __u64 address;
+ /* The number of entries in the virtio_ring */
+ __u16 num;
+
+};
+
+#define KVM_S390_VIRTIO_NOTIFY 0
+#define KVM_S390_VIRTIO_RESET 1
+#define KVM_S390_VIRTIO_SET_STATUS 2
+
+#endif
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 5de3efb31445..0bc51d52a899 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -381,27 +381,32 @@ struct _lowcore
/* whether the kernel died with panic() or not */
__u32 panic_magic; /* 0xe00 */
- __u8 pad13[0x1200-0xe04]; /* 0xe04 */
+ __u8 pad13[0x11b8-0xe04]; /* 0xe04 */
+
+ /* 64 bit extparam used for pfault, diag 250 etc */
+ __u64 ext_params2; /* 0x11B8 */
+
+ __u8 pad14[0x1200-0x11C0]; /* 0x11C0 */
/* System info area */
__u64 floating_pt_save_area[16]; /* 0x1200 */
__u64 gpregs_save_area[16]; /* 0x1280 */
__u32 st_status_fixed_logout[4]; /* 0x1300 */
- __u8 pad14[0x1318-0x1310]; /* 0x1310 */
+ __u8 pad15[0x1318-0x1310]; /* 0x1310 */
__u32 prefixreg_save_area; /* 0x1318 */
__u32 fpt_creg_save_area; /* 0x131c */
- __u8 pad15[0x1324-0x1320]; /* 0x1320 */
+ __u8 pad16[0x1324-0x1320]; /* 0x1320 */
__u32 tod_progreg_save_area; /* 0x1324 */
__u32 cpu_timer_save_area[2]; /* 0x1328 */
__u32 clock_comp_save_area[2]; /* 0x1330 */
- __u8 pad16[0x1340-0x1338]; /* 0x1338 */
+ __u8 pad17[0x1340-0x1338]; /* 0x1338 */
__u32 access_regs_save_area[16]; /* 0x1340 */
__u64 cregs_save_area[16]; /* 0x1380 */
/* align to the top of the prefix area */
- __u8 pad17[0x2000-0x1400]; /* 0x1400 */
+ __u8 pad18[0x2000-0x1400]; /* 0x1400 */
#endif /* !__s390x__ */
} __attribute__((packed)); /* End structure*/
diff --git a/include/asm-s390/mmu.h b/include/asm-s390/mmu.h
index 1698e29c5b20..5dd5e7b3476f 100644
--- a/include/asm-s390/mmu.h
+++ b/include/asm-s390/mmu.h
@@ -7,6 +7,7 @@ typedef struct {
unsigned long asce_bits;
unsigned long asce_limit;
int noexec;
+ int pgstes;
} mm_context_t;
#endif
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index b5a34c6f91a9..4c2fbf48c9c4 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -20,7 +20,13 @@ static inline int init_new_context(struct task_struct *tsk,
#ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#endif
- mm->context.noexec = s390_noexec;
+ if (current->mm->context.pgstes) {
+ mm->context.noexec = 0;
+ mm->context.pgstes = 1;
+ } else {
+ mm->context.noexec = s390_noexec;
+ mm->context.pgstes = 0;
+ }
mm->context.asce_limit = STACK_TOP_MAX;
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0;
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 65154dc9a9e5..f8347ce9c5a1 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -30,6 +30,7 @@
*/
#ifndef __ASSEMBLY__
#include <linux/mm_types.h>
+#include <asm/bitops.h>
#include <asm/bug.h>
#include <asm/processor.h>
@@ -219,6 +220,8 @@ extern char empty_zero_page[PAGE_SIZE];
/* Software bits in the page table entry */
#define _PAGE_SWT 0x001 /* SW pte type bit t */
#define _PAGE_SWX 0x002 /* SW pte type bit x */
+#define _PAGE_SPECIAL 0x004 /* SW associated with special page */
+#define __HAVE_ARCH_PTE_SPECIAL
/* Six different types of pages. */
#define _PAGE_TYPE_EMPTY 0x400
@@ -258,6 +261,13 @@ extern char empty_zero_page[PAGE_SIZE];
* swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
*/
+/* Page status table bits for virtualization */
+#define RCP_PCL_BIT 55
+#define RCP_HR_BIT 54
+#define RCP_HC_BIT 53
+#define RCP_GR_BIT 50
+#define RCP_GC_BIT 49
+
#ifndef __s390x__
/* Bits in the segment table address-space-control-element */
@@ -510,9 +520,56 @@ static inline int pte_file(pte_t pte)
return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
}
+static inline int pte_special(pte_t pte)
+{
+ return (pte_val(pte) & _PAGE_SPECIAL);
+}
+
#define __HAVE_ARCH_PTE_SAME
#define pte_same(a,b) (pte_val(a) == pte_val(b))
+static inline void rcp_lock(pte_t *ptep)
+{
+#ifdef CONFIG_PGSTE
+ unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+ preempt_disable();
+ while (test_and_set_bit(RCP_PCL_BIT, pgste))
+ ;
+#endif
+}
+
+static inline void rcp_unlock(pte_t *ptep)
+{
+#ifdef CONFIG_PGSTE
+ unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+ clear_bit(RCP_PCL_BIT, pgste);
+ preempt_enable();
+#endif
+}
+
+/* forward declaration for SetPageUptodate in page-flags.h*/
+static inline void page_clear_dirty(struct page *page);
+#include <linux/page-flags.h>
+
+static inline void ptep_rcp_copy(pte_t *ptep)
+{
+#ifdef CONFIG_PGSTE
+ struct page *page = virt_to_page(pte_val(*ptep));
+ unsigned int skey;
+ unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+
+ skey = page_get_storage_key(page_to_phys(page));
+ if (skey & _PAGE_CHANGED)
+ set_bit_simple(RCP_GC_BIT, pgste);
+ if (skey & _PAGE_REFERENCED)
+ set_bit_simple(RCP_GR_BIT, pgste);
+ if (test_and_clear_bit_simple(RCP_HC_BIT, pgste))
+ SetPageDirty(page);
+ if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
+ SetPageReferenced(page);
+#endif
+}
+
/*
* query functions pte_write/pte_dirty/pte_young only work if
* pte_present() is true. Undefined behaviour if not..
@@ -599,6 +656,8 @@ static inline void pmd_clear(pmd_t *pmd)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
+ if (mm->context.pgstes)
+ ptep_rcp_copy(ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
if (mm->context.noexec)
pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
@@ -663,10 +722,34 @@ static inline pte_t pte_mkyoung(pte_t pte)
return pte;
}
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_SPECIAL;
+ return pte;
+}
+
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
+#ifdef CONFIG_PGSTE
+ unsigned long physpage;
+ int young;
+ unsigned long *pgste;
+
+ if (!vma->vm_mm->context.pgstes)
+ return 0;
+ physpage = pte_val(*ptep) & PAGE_MASK;
+ pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+
+ young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0);
+ rcp_lock(ptep);
+ if (young)
+ set_bit_simple(RCP_GR_BIT, pgste);
+ young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
+ rcp_unlock(ptep);
+ return young;
+#endif
return 0;
}
@@ -674,7 +757,13 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
- /* No need to flush TLB; bits are in storage key */
+ /* No need to flush TLB
+ * On s390 reference bits are in storage key and never in TLB
+ * With virtualization we handle the reference bit, without we
+ * we can simply return */
+#ifdef CONFIG_PGSTE
+ return ptep_test_and_clear_young(vma, address, ptep);
+#endif
return 0;
}
@@ -693,15 +782,25 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
: "=m" (*ptep) : "m" (*ptep),
"a" (pto), "a" (address));
}
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
}
static inline void ptep_invalidate(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
+ if (mm->context.pgstes) {
+ rcp_lock(ptep);
+ __ptep_ipte(address, ptep);
+ ptep_rcp_copy(ptep);
+ pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ rcp_unlock(ptep);
+ return;
+ }
__ptep_ipte(address, ptep);
- if (mm->context.noexec)
+ pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ if (mm->context.noexec) {
__ptep_ipte(address, ptep + PTRS_PER_PTE);
+ pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
+ }
}
/*
@@ -966,6 +1065,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
extern int add_shared_memory(unsigned long start, unsigned long size);
extern int remove_shared_memory(unsigned long start, unsigned long size);
+extern int s390_enable_sie(void);
/*
* No page table caches to initialise
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index a76a6b8fd887..aaf4b518b940 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -62,6 +62,7 @@ extern unsigned long machine_flags;
#define MACHINE_IS_VM (machine_flags & 1)
#define MACHINE_IS_P390 (machine_flags & 4)
#define MACHINE_HAS_MVPG (machine_flags & 16)
+#define MACHINE_IS_KVM (machine_flags & 64)
#define MACHINE_HAS_IDTE (machine_flags & 128)
#define MACHINE_HAS_DIAG9C (machine_flags & 256)
diff --git a/include/asm-sh/hugetlb.h b/include/asm-sh/hugetlb.h
new file mode 100644
index 000000000000..02402303d89b
--- /dev/null
+++ b/include/asm-sh/hugetlb.h
@@ -0,0 +1,91 @@
+#ifndef _ASM_SH_HUGETLB_H
+#define _ASM_SH_HUGETLB_H
+
+#include <asm/page.h>
+
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr,
+ unsigned long len) {
+ return 0;
+}
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
+{
+ if (len & ~HPAGE_MASK)
+ return -EINVAL;
+ if (addr & ~HPAGE_MASK)
+ return -EINVAL;
+ return 0;
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor,
+ unsigned long ceiling)
+{
+ free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+ return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+#endif /* _ASM_SH_HUGETLB_H */
diff --git a/include/asm-sh/pgtable_32.h b/include/asm-sh/pgtable_32.h
index 3e3557c53c55..cbc731d35c25 100644
--- a/include/asm-sh/pgtable_32.h
+++ b/include/asm-sh/pgtable_32.h
@@ -326,6 +326,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
#define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY)
#define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED)
#define pte_file(pte) ((pte).pte_low & _PAGE_FILE)
+#define pte_special(pte) (0)
#ifdef CONFIG_X2TLB
#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE)
@@ -356,6 +357,8 @@ PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY);
PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED);
PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
/*
* Macro and implementation to make a page protection as uncachable.
*/
diff --git a/include/asm-sh/pgtable_64.h b/include/asm-sh/pgtable_64.h
index f9dd9d311441..c78990cda557 100644
--- a/include/asm-sh/pgtable_64.h
+++ b/include/asm-sh/pgtable_64.h
@@ -254,10 +254,11 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd);
/*
* The following have defined behavior only work if pte_present() is true.
*/
-static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
-static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
+static inline int pte_special(pte_t pte){ return 0; }
static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
@@ -266,6 +267,7 @@ static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) |
static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/*
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index 2cc235b74d94..d84af6d95f5c 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -219,6 +219,11 @@ static inline int pte_file(pte_t pte)
return pte_val(pte) & BTFIXUP_HALF(pte_filei);
}
+static inline int pte_special(pte_t pte)
+{
+ return 0;
+}
+
/*
*/
BTFIXUPDEF_HALF(pte_wrprotecti)
@@ -251,6 +256,8 @@ BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
#define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte)
#define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte)
+#define pte_mkspecial(pte) (pte)
+
#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
BTFIXUPDEF_CALL(unsigned long, pte_pfn, pte_t)
diff --git a/include/asm-sparc64/hugetlb.h b/include/asm-sparc64/hugetlb.h
new file mode 100644
index 000000000000..412af58926a0
--- /dev/null
+++ b/include/asm-sparc64/hugetlb.h
@@ -0,0 +1,84 @@
+#ifndef _ASM_SPARC64_HUGETLB_H
+#define _ASM_SPARC64_HUGETLB_H
+
+#include <asm/page.h>
+
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
+
+void hugetlb_prefault_arch_hook(struct mm_struct *mm);
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr,
+ unsigned long len) {
+ return 0;
+}
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
+{
+ if (len & ~HPAGE_MASK)
+ return -EINVAL;
+ if (addr & ~HPAGE_MASK)
+ return -EINVAL;
+ return 0;
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor,
+ unsigned long ceiling)
+{
+ free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+ return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+#endif /* _ASM_SPARC64_HUGETLB_H */
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
index e93a482aa24a..618117def0dc 100644
--- a/include/asm-sparc64/page.h
+++ b/include/asm-sparc64/page.h
@@ -39,8 +39,6 @@
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1UL))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-#define ARCH_HAS_SETCLEAR_HUGE_PTE
-#define ARCH_HAS_HUGETLB_PREFAULT_HOOK
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 549e45266b68..0e200e7acec7 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -506,6 +506,11 @@ static inline pte_t pte_mkyoung(pte_t pte)
return __pte(pte_val(pte) | mask);
}
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return pte;
+}
+
static inline unsigned long pte_young(pte_t pte)
{
unsigned long mask;
@@ -608,6 +613,11 @@ static inline unsigned long pte_present(pte_t pte)
return val;
}
+static inline int pte_special(pte_t pte)
+{
+ return 0;
+}
+
#define pmd_set(pmdp, ptep) \
(pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
#define pud_set(pudp, pmdp) \
diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h
index 4102b443e925..02db81b7b86e 100644
--- a/include/asm-um/pgtable.h
+++ b/include/asm-um/pgtable.h
@@ -173,6 +173,11 @@ static inline int pte_newprot(pte_t pte)
return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
}
+static inline int pte_special(pte_t pte)
+{
+ return 0;
+}
+
/*
* =================================
* Flags setting section.
@@ -241,6 +246,11 @@ static inline pte_t pte_mknewpage(pte_t pte)
return(pte);
}
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return(pte);
+}
+
static inline void set_pte(pte_t *pteptr, pte_t pteval)
{
pte_copy(*pteptr, pteval);
diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h
index 9870cc1f2f8f..7154dc4de951 100644
--- a/include/asm-x86/geode.h
+++ b/include/asm-x86/geode.h
@@ -30,7 +30,13 @@ extern int geode_get_dev_base(unsigned int dev);
/* MSRS */
-#define GX_GLCP_SYS_RSTPLL 0x4C000014
+#define MSR_GLIU_P2D_RO0 0x10000029
+
+#define MSR_LX_GLD_MSR_CONFIG 0x48002001
+#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data
+ * sheet has the wrong value */
+#define MSR_GLCP_SYS_RSTPLL 0x4C000014
+#define MSR_GLCP_DOTPLL 0x4C000015
#define MSR_LBAR_SMB 0x5140000B
#define MSR_LBAR_GPIO 0x5140000C
@@ -45,8 +51,14 @@ extern int geode_get_dev_base(unsigned int dev);
#define MSR_PIC_ZSEL_LOW 0x51400022
#define MSR_PIC_ZSEL_HIGH 0x51400023
-#define MFGPT_IRQ_MSR 0x51400028
-#define MFGPT_NR_MSR 0x51400029
+#define MSR_MFGPT_IRQ 0x51400028
+#define MSR_MFGPT_NR 0x51400029
+#define MSR_MFGPT_SETUP 0x5140002B
+
+#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */
+
+#define MSR_GX_GLD_MSR_CONFIG 0xC0002001
+#define MSR_GX_MSR_PADSEL 0xC0002011
/* Resource Sizes */
@@ -93,6 +105,15 @@ extern int geode_get_dev_base(unsigned int dev);
#define PM_AWKD 0x50
#define PM_SSC 0x54
+/* VSA2 magic values */
+
+#define VSA_VRC_INDEX 0xAC1C
+#define VSA_VRC_DATA 0xAC1E
+#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
+#define VSA_VR_SIGNATURE 0x0003
+#define VSA_VR_MEM_SIZE 0x0200
+#define VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
+
/* GPIO */
#define GPIO_OUTPUT_VAL 0x00
@@ -164,6 +185,17 @@ static inline int is_geode(void)
return (is_geode_gx() || is_geode_lx());
}
+/*
+ * The VSA has virtual registers that we can query for a signature.
+ */
+static inline int geode_has_vsa2(void)
+{
+ outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
+ outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
+
+ return (inw(VSA_VRC_DATA) == VSA_SIG);
+}
+
/* MFGPTs */
#define MFGPT_MAX_TIMERS 8
diff --git a/include/asm-x86/hugetlb.h b/include/asm-x86/hugetlb.h
new file mode 100644
index 000000000000..14171a4924f6
--- /dev/null
+++ b/include/asm-x86/hugetlb.h
@@ -0,0 +1,91 @@
+#ifndef _ASM_X86_HUGETLB_H
+#define _ASM_X86_HUGETLB_H
+
+#include <asm/page.h>
+
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr,
+ unsigned long len) {
+ return 0;
+}
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
+{
+ if (len & ~HPAGE_MASK)
+ return -EINVAL;
+ if (addr & ~HPAGE_MASK)
+ return -EINVAL;
+ return 0;
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor,
+ unsigned long ceiling)
+{
+ free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+ return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+#endif /* _ASM_X86_HUGETLB_H */
diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h
index 7a71120426a3..80eefef2cc76 100644
--- a/include/asm-x86/kvm.h
+++ b/include/asm-x86/kvm.h
@@ -188,4 +188,45 @@ struct kvm_cpuid2 {
struct kvm_cpuid_entry2 entries[0];
};
+/* for KVM_GET_PIT and KVM_SET_PIT */
+struct kvm_pit_channel_state {
+ __u32 count; /* can be 65536 */
+ __u16 latched_count;
+ __u8 count_latched;
+ __u8 status_latched;
+ __u8 status;
+ __u8 read_state;
+ __u8 write_state;
+ __u8 write_latch;
+ __u8 rw_mode;
+ __u8 mode;
+ __u8 bcd;
+ __u8 gate;
+ __s64 count_load_time;
+};
+
+struct kvm_pit_state {
+ struct kvm_pit_channel_state channels[3];
+};
+
+#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
+#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
+#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
+#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
+#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
+#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
+#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
+#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
+#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
+#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
+#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
+#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
+#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
+#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
+#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
+#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
+#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
+#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
+#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
+
#endif
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 68ee390b2844..9d963cd6533c 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -20,6 +20,13 @@
#include <asm/desc.h>
+#define KVM_MAX_VCPUS 16
+#define KVM_MEMORY_SLOTS 32
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+#define KVM_PIO_PAGE_OFFSET 1
+
#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
@@ -39,6 +46,13 @@
#define INVALID_PAGE (~(hpa_t)0)
#define UNMAPPED_GVA (~(gpa_t)0)
+/* shadow tables are PAE even on non-PAE hosts */
+#define KVM_HPAGE_SHIFT 21
+#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT)
+#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1))
+
+#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE)
+
#define DE_VECTOR 0
#define UD_VECTOR 6
#define NM_VECTOR 7
@@ -48,6 +62,7 @@
#define SS_VECTOR 12
#define GP_VECTOR 13
#define PF_VECTOR 14
+#define MC_VECTOR 18
#define SELECTOR_TI_MASK (1 << 2)
#define SELECTOR_RPL_MASK 0x03
@@ -58,7 +73,8 @@
#define KVM_PERMILLE_MMU_PAGES 20
#define KVM_MIN_ALLOC_MMU_PAGES 64
-#define KVM_NUM_MMU_PAGES 1024
+#define KVM_MMU_HASH_SHIFT 10
+#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
#define KVM_MAX_CPUID_ENTRIES 40
@@ -106,6 +122,12 @@ enum {
#define KVM_NR_MEM_OBJS 40
+struct kvm_guest_debug {
+ int enabled;
+ unsigned long bp[4];
+ int singlestep;
+};
+
/*
* We don't want allocation failures within the mmu code, so we preallocate
* enough memory for a single page fault in a cache.
@@ -140,6 +162,7 @@ union kvm_mmu_page_role {
unsigned pad_for_nice_hex_output:6;
unsigned metaphysical:1;
unsigned access:3;
+ unsigned invalid:1;
};
};
@@ -204,11 +227,6 @@ struct kvm_vcpu_arch {
u64 shadow_efer;
u64 apic_base;
struct kvm_lapic *apic; /* kernel irqchip context */
-#define VCPU_MP_STATE_RUNNABLE 0
-#define VCPU_MP_STATE_UNINITIALIZED 1
-#define VCPU_MP_STATE_INIT_RECEIVED 2
-#define VCPU_MP_STATE_SIPI_RECEIVED 3
-#define VCPU_MP_STATE_HALTED 4
int mp_state;
int sipi_vector;
u64 ia32_misc_enable_msr;
@@ -226,8 +244,9 @@ struct kvm_vcpu_arch {
u64 *last_pte_updated;
struct {
- gfn_t gfn; /* presumed gfn during guest pte update */
- struct page *page; /* page corresponding to that gfn */
+ gfn_t gfn; /* presumed gfn during guest pte update */
+ pfn_t pfn; /* pfn corresponding to that gfn */
+ int largepage;
} update_pte;
struct i387_fxsave_struct host_fx_image;
@@ -261,6 +280,11 @@ struct kvm_vcpu_arch {
/* emulate context */
struct x86_emulate_ctxt emulate_ctxt;
+
+ gpa_t time;
+ struct kvm_vcpu_time_info hv_clock;
+ unsigned int time_offset;
+ struct page *time_page;
};
struct kvm_mem_alias {
@@ -283,10 +307,13 @@ struct kvm_arch{
struct list_head active_mmu_pages;
struct kvm_pic *vpic;
struct kvm_ioapic *vioapic;
+ struct kvm_pit *vpit;
int round_robin_prev_vcpu;
unsigned int tss_addr;
struct page *apic_access_page;
+
+ gpa_t wall_clock;
};
struct kvm_vm_stat {
@@ -298,6 +325,7 @@ struct kvm_vm_stat {
u32 mmu_recycled;
u32 mmu_cache_miss;
u32 remote_tlb_flush;
+ u32 lpages;
};
struct kvm_vcpu_stat {
@@ -320,6 +348,7 @@ struct kvm_vcpu_stat {
u32 fpu_reload;
u32 insn_emulation;
u32 insn_emulation_fail;
+ u32 hypercalls;
};
struct descriptor_table {
@@ -355,6 +384,7 @@ struct kvm_x86_ops {
u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
void (*get_segment)(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
+ int (*get_cpl)(struct kvm_vcpu *vcpu);
void (*set_segment)(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
@@ -410,6 +440,15 @@ void kvm_mmu_zap_all(struct kvm *kvm);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
+int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
+
+int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const void *val, int bytes);
+int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
+ gpa_t addr, unsigned long *ret);
+
+extern bool tdp_enabled;
+
enum emulation_result {
EMULATE_DONE, /* no further processing */
EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
@@ -429,6 +468,7 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
unsigned long *rflags);
+void kvm_enable_efer_bits(u64);
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
@@ -448,12 +488,14 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
unsigned long value);
-void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
-void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
-void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
-void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
-unsigned long get_cr8(struct kvm_vcpu *vcpu);
-void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
+int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
+
+void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
+void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
+void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
+void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
+unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
+void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
@@ -491,6 +533,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
+void kvm_enable_tdp(void);
+
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
int complete_pio(struct kvm_vcpu *vcpu);
@@ -600,6 +644,7 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
+#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
#define MSR_IA32_TIME_STAMP_COUNTER 0x010
@@ -610,4 +655,30 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
#define RMODE_TSS_SIZE \
(TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
+enum {
+ TASK_SWITCH_CALL = 0,
+ TASK_SWITCH_IRET = 1,
+ TASK_SWITCH_JMP = 2,
+ TASK_SWITCH_GATE = 3,
+};
+
+#define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 5, d1, d2, d3, d4, d5)
+#define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 4, d1, d2, d3, d4, 0)
+#define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 3, d1, d2, d3, 0, 0)
+#define KVMTRACE_2D(evt, vcpu, d1, d2, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 2, d1, d2, 0, 0, 0)
+#define KVMTRACE_1D(evt, vcpu, d1, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 1, d1, 0, 0, 0, 0)
+#define KVMTRACE_0D(evt, vcpu, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 0, 0, 0, 0, 0, 0)
+
#endif
diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h
index c6f3fd8d8c53..509845942070 100644
--- a/include/asm-x86/kvm_para.h
+++ b/include/asm-x86/kvm_para.h
@@ -10,10 +10,65 @@
* paravirtualization, the appropriate feature bit should be checked.
*/
#define KVM_CPUID_FEATURES 0x40000001
+#define KVM_FEATURE_CLOCKSOURCE 0
+#define KVM_FEATURE_NOP_IO_DELAY 1
+#define KVM_FEATURE_MMU_OP 2
+
+#define MSR_KVM_WALL_CLOCK 0x11
+#define MSR_KVM_SYSTEM_TIME 0x12
+
+#define KVM_MAX_MMU_OP_BATCH 32
+
+/* Operations for KVM_HC_MMU_OP */
+#define KVM_MMU_OP_WRITE_PTE 1
+#define KVM_MMU_OP_FLUSH_TLB 2
+#define KVM_MMU_OP_RELEASE_PT 3
+
+/* Payload for KVM_HC_MMU_OP */
+struct kvm_mmu_op_header {
+ __u32 op;
+ __u32 pad;
+};
+
+struct kvm_mmu_op_write_pte {
+ struct kvm_mmu_op_header header;
+ __u64 pte_phys;
+ __u64 pte_val;
+};
+
+struct kvm_mmu_op_flush_tlb {
+ struct kvm_mmu_op_header header;
+};
+
+struct kvm_mmu_op_release_pt {
+ struct kvm_mmu_op_header header;
+ __u64 pt_phys;
+};
#ifdef __KERNEL__
#include <asm/processor.h>
+/* xen binary-compatible interface. See xen headers for details */
+struct kvm_vcpu_time_info {
+ uint32_t version;
+ uint32_t pad0;
+ uint64_t tsc_timestamp;
+ uint64_t system_time;
+ uint32_t tsc_to_system_mul;
+ int8_t tsc_shift;
+ int8_t pad[3];
+} __attribute__((__packed__)); /* 32 bytes */
+
+struct kvm_wall_clock {
+ uint32_t wc_version;
+ uint32_t wc_sec;
+ uint32_t wc_nsec;
+} __attribute__((__packed__));
+
+
+extern void kvmclock_init(void);
+
+
/* This instruction is vmcall. On non-VT architectures, it will generate a
* trap that we will then rewrite to the appropriate instruction.
*/
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index a496d6335d3b..801b31f71452 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -195,6 +195,11 @@ static inline int pte_exec(pte_t pte)
return !(pte_val(pte) & _PAGE_NX);
}
+static inline int pte_special(pte_t pte)
+{
+ return 0;
+}
+
static inline int pmd_large(pmd_t pte)
{
return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
@@ -256,6 +261,11 @@ static inline pte_t pte_clrglobal(pte_t pte)
return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL);
}
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return pte;
+}
+
extern pteval_t __supported_pte_mask;
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 117343b0c271..2e7974ec77ec 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -722,6 +722,7 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
+ trace_hardirqs_on();
/* "mwait %eax, %ecx;" */
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx));
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h
index 6b5233b4f84b..e63741f19392 100644
--- a/include/asm-x86/reboot.h
+++ b/include/asm-x86/reboot.h
@@ -15,5 +15,7 @@ struct machine_ops {
extern struct machine_ops machine_ops;
void machine_real_restart(unsigned char *code, int length);
+void native_machine_crash_shutdown(struct pt_regs *regs);
+void native_machine_shutdown(void);
#endif /* _ASM_REBOOT_H */
diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h
index c8b024a48b4d..8014d96b21f1 100644
--- a/include/asm-xtensa/pgtable.h
+++ b/include/asm-xtensa/pgtable.h
@@ -210,6 +210,8 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte) { return 0; }
+
static inline pte_t pte_wrprotect(pte_t pte)
{ pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
static inline pte_t pte_mkclean(pte_t pte)
@@ -222,6 +224,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
{ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte)
{ pte_val(pte) |= _PAGE_WRITABLE; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte)
+ { return pte; }
/*
* Conversion functions: convert a page and protection to a page entry,
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 1dbe074f1c64..43b406def35f 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -46,6 +46,8 @@
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
+ * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
+ * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
* bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf
* bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
* bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
@@ -121,6 +123,10 @@ extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, int bits);
extern int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
+extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
+ const unsigned long *relmap, int bits);
+extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
+ int sz, int bits);
extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 4e4e340592fb..6a5dbdc8a7dc 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -101,6 +101,8 @@ extern void reserve_bootmem_node(pg_data_t *pgdat,
extern void free_bootmem_node(pg_data_t *pgdat,
unsigned long addr,
unsigned long size);
+extern void *alloc_bootmem_section(unsigned long size,
+ unsigned long section_nr);
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
#define alloc_bootmem_node(pgdat, x) \
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index e8406c55c6d3..cf0303a60611 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -56,19 +56,25 @@ struct sg_io_v4 {
#if defined(CONFIG_BLK_DEV_BSG)
struct bsg_class_device {
struct device *class_dev;
- struct device *dev;
+ struct device *parent;
int minor;
struct request_queue *queue;
+ struct kref ref;
+ void (*release)(struct device *);
};
-extern int bsg_register_queue(struct request_queue *, struct device *, const char *);
+extern int bsg_register_queue(struct request_queue *q,
+ struct device *parent, const char *name,
+ void (*release)(struct device *));
extern void bsg_unregister_queue(struct request_queue *);
#else
-static inline int bsg_register_queue(struct request_queue * rq, struct device *dev, const char *name)
+static inline int bsg_register_queue(struct request_queue *q,
+ struct device *parent, const char *name,
+ void (*release)(struct device *))
{
return 0;
}
-static inline void bsg_unregister_queue(struct request_queue *rq)
+static inline void bsg_unregister_queue(struct request_queue *q)
{
}
#endif
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 4552504c0228..97e24881c4c6 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -60,4 +60,8 @@
#endif
#endif
+#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
+#define cache_line_size() L1_CACHE_BYTES
+#endif
+
#endif /* __LINUX_CACHE_H */
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 7d50ff6d269f..eaab759b1460 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -155,6 +155,7 @@ typedef struct kernel_cap_struct {
* Add any capability from current's capability bounding set
* to the current process' inheritable set
* Allow taking bits out of capability bounding set
+ * Allow modification of the securebits for a process
*/
#define CAP_SETPCAP 8
@@ -490,8 +491,6 @@ extern const kernel_cap_t __cap_init_eff_set;
int capable(int cap);
int __capable(struct task_struct *t, int cap);
-extern long cap_prctl_drop(unsigned long cap);
-
#endif /* __KERNEL__ */
#endif /* !_LINUX_CAPABILITY_H */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 259c8051155d..9650806fe2ea 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -14,6 +14,8 @@
* bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
* For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c
* For details of cpus_remap(), see bitmap_remap in lib/bitmap.c.
+ * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c.
+ * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c.
*
* The available cpumask operations are:
*
@@ -53,7 +55,9 @@
* int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
* int cpulist_parse(buf, map) Parse ascii string as cpulist
* int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
- * int cpus_remap(dst, src, old, new) *dst = map(old, new)(src)
+ * void cpus_remap(dst, src, old, new) *dst = map(old, new)(src)
+ * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap
+ * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz
*
* for_each_cpu_mask(cpu, mask) for-loop cpu over mask
*
@@ -330,6 +334,22 @@ static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
}
+#define cpus_onto(dst, orig, relmap) \
+ __cpus_onto(&(dst), &(orig), &(relmap), NR_CPUS)
+static inline void __cpus_onto(cpumask_t *dstp, const cpumask_t *origp,
+ const cpumask_t *relmapp, int nbits)
+{
+ bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
+}
+
+#define cpus_fold(dst, orig, sz) \
+ __cpus_fold(&(dst), &(orig), sz, NR_CPUS)
+static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
+ int sz, int nbits)
+{
+ bitmap_fold(dstp->bits, origp->bits, sz, nbits);
+}
+
#if NR_CPUS > 1
#define for_each_cpu_mask(cpu, mask) \
for ((cpu) = first_cpu(mask); \
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 726761e24003..038578362b47 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -26,7 +26,7 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
void cpuset_update_task_memory_state(void);
-int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl);
+int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask);
extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask);
@@ -103,7 +103,7 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
static inline void cpuset_init_current_mems_allowed(void) {}
static inline void cpuset_update_task_memory_state(void) {}
-static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
+static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
{
return 1;
}
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 325acdf5c462..2a063b64133f 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -90,6 +90,7 @@ static inline int dmi_check_system(const struct dmi_system_id *list) { return 0;
static inline const char * dmi_get_system_info(int field) { return NULL; }
static inline const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from) { return NULL; }
+static inline void dmi_scan_machine(void) { return; }
static inline int dmi_get_year(int year) { return 0; }
static inline int dmi_name_in_vendors(const char *s) { return 0; }
#define dmi_available 0
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 58c57a33e5dd..72295b099228 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -791,6 +791,17 @@ struct fb_tile_ops {
*/
#define FBINFO_MISC_ALWAYS_SETPAR 0x40000
+/*
+ * Host and GPU endianness differ.
+ */
+#define FBINFO_FOREIGN_ENDIAN 0x100000
+/*
+ * Big endian math. This is the same flags as above, but with different
+ * meaning, it is set by the fb subsystem depending FOREIGN_ENDIAN flag
+ * and host endianness. Drivers should not use this flag.
+ */
+#define FBINFO_BE_MATH 0x100000
+
struct fb_info {
int node;
int flags;
@@ -899,15 +910,11 @@ struct fb_info {
#endif
-#if defined (__BIG_ENDIAN)
-#define FB_LEFT_POS(bpp) (32 - bpp)
-#define FB_SHIFT_HIGH(val, bits) ((val) >> (bits))
-#define FB_SHIFT_LOW(val, bits) ((val) << (bits))
-#else
-#define FB_LEFT_POS(bpp) (0)
-#define FB_SHIFT_HIGH(val, bits) ((val) << (bits))
-#define FB_SHIFT_LOW(val, bits) ((val) >> (bits))
-#endif
+#define FB_LEFT_POS(p, bpp) (fb_be_math(p) ? (32 - (bpp)) : 0)
+#define FB_SHIFT_HIGH(p, val, bits) (fb_be_math(p) ? (val) >> (bits) : \
+ (val) << (bits))
+#define FB_SHIFT_LOW(p, val, bits) (fb_be_math(p) ? (val) << (bits) : \
+ (val) >> (bits))
/*
* `Generic' versions of the frame buffer device operations
@@ -970,6 +977,25 @@ extern void fb_deferred_io_cleanup(struct fb_info *info);
extern int fb_deferred_io_fsync(struct file *file, struct dentry *dentry,
int datasync);
+static inline bool fb_be_math(struct fb_info *info)
+{
+#ifdef CONFIG_FB_FOREIGN_ENDIAN
+#if defined(CONFIG_FB_BOTH_ENDIAN)
+ return info->flags & FBINFO_BE_MATH;
+#elif defined(CONFIG_FB_BIG_ENDIAN)
+ return true;
+#elif defined(CONFIG_FB_LITTLE_ENDIAN)
+ return false;
+#endif /* CONFIG_FB_BOTH_ENDIAN */
+#else
+#ifdef __BIG_ENDIAN
+ return true;
+#else
+ return false;
+#endif /* __BIG_ENDIAN */
+#endif /* CONFIG_FB_FOREIGN_ENDIAN */
+}
+
/* drivers/video/fbsysfs.c */
extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
extern void framebuffer_release(struct fb_info *info);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index d6d7c52055c6..2c925747bc49 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -474,8 +474,8 @@ struct address_space_operations {
int (*releasepage) (struct page *, gfp_t);
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
loff_t offset, unsigned long nr_segs);
- struct page* (*get_xip_page)(struct address_space *, sector_t,
- int);
+ int (*get_xip_mem)(struct address_space *, pgoff_t, int,
+ void **, unsigned long *);
/* migrate the contents of a page to the specified target */
int (*migratepage) (struct address_space *,
struct page *, struct page *);
@@ -1178,7 +1178,8 @@ struct block_device_operations {
int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long);
long (*unlocked_ioctl) (struct file *, unsigned, unsigned long);
long (*compat_ioctl) (struct file *, unsigned, unsigned long);
- int (*direct_access) (struct block_device *, sector_t, unsigned long *);
+ int (*direct_access) (struct block_device *, sector_t,
+ void **, unsigned long *);
int (*media_changed) (struct gendisk *);
int (*revalidate_disk) (struct gendisk *);
int (*getgeo)(struct block_device *, struct hd_geometry *);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 164be9da3c1b..c37653b6843f 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -119,35 +119,22 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
static inline enum zone_type gfp_zone(gfp_t flags)
{
- int base = 0;
-
-#ifdef CONFIG_NUMA
- if (flags & __GFP_THISNODE)
- base = MAX_NR_ZONES;
-#endif
-
#ifdef CONFIG_ZONE_DMA
if (flags & __GFP_DMA)
- return base + ZONE_DMA;
+ return ZONE_DMA;
#endif
#ifdef CONFIG_ZONE_DMA32
if (flags & __GFP_DMA32)
- return base + ZONE_DMA32;
+ return ZONE_DMA32;
#endif
if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) ==
(__GFP_HIGHMEM | __GFP_MOVABLE))
- return base + ZONE_MOVABLE;
+ return ZONE_MOVABLE;
#ifdef CONFIG_HIGHMEM
if (flags & __GFP_HIGHMEM)
- return base + ZONE_HIGHMEM;
+ return ZONE_HIGHMEM;
#endif
- return base + ZONE_NORMAL;
-}
-
-static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags)
-{
- BUG_ON((gfp & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
- return (gfp & ~(GFP_MOVABLE_MASK)) | migrate_flags;
+ return ZONE_NORMAL;
}
/*
@@ -157,13 +144,27 @@ static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags)
* virtual kernel addresses to the allocated page(s).
*/
+static inline int gfp_zonelist(gfp_t flags)
+{
+ if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE))
+ return 1;
+
+ return 0;
+}
+
/*
* We get the zone list from the current node and the gfp_mask.
* This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
+ * There are two zonelists per node, one for all zones with memory and
+ * one containing just zones from the node the zonelist belongs to.
*
* For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
* optimized to &contig_page_data at compile-time.
*/
+static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
+{
+ return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
+}
#ifndef HAVE_ARCH_FREE_PAGE
static inline void arch_free_page(struct page *page, int order) { }
@@ -174,6 +175,10 @@ static inline void arch_alloc_page(struct page *page, int order) { }
extern struct page *__alloc_pages(gfp_t, unsigned int, struct zonelist *);
+extern struct page *
+__alloc_pages_nodemask(gfp_t, unsigned int,
+ struct zonelist *, nodemask_t *nodemask);
+
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
unsigned int order)
{
@@ -184,8 +189,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
if (nid < 0)
nid = numa_node_id();
- return __alloc_pages(gfp_mask, order,
- NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_mask));
+ return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
}
#ifdef CONFIG_NUMA
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index addca4cd4f11..a79e80b689d8 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -8,6 +8,7 @@
#include <linux/mempolicy.h>
#include <linux/shm.h>
#include <asm/tlbflush.h>
+#include <asm/hugetlb.h>
struct ctl_table;
@@ -51,51 +52,6 @@ int pmd_huge(pmd_t pmd);
void hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot);
-#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
-#define is_hugepage_only_range(mm, addr, len) 0
-#endif
-
-#ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE
-#define hugetlb_free_pgd_range free_pgd_range
-#else
-void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
- unsigned long end, unsigned long floor,
- unsigned long ceiling);
-#endif
-
-#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
-/*
- * If the arch doesn't supply something else, assume that hugepage
- * size aligned regions are ok without further preparation.
- */
-static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
-{
- if (len & ~HPAGE_MASK)
- return -EINVAL;
- if (addr & ~HPAGE_MASK)
- return -EINVAL;
- return 0;
-}
-#else
-int prepare_hugepage_range(unsigned long addr, unsigned long len);
-#endif
-
-#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
-#define set_huge_pte_at(mm, addr, ptep, pte) set_pte_at(mm, addr, ptep, pte)
-#define huge_ptep_get_and_clear(mm, addr, ptep) ptep_get_and_clear(mm, addr, ptep)
-#else
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte);
-pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep);
-#endif
-
-#ifndef ARCH_HAS_HUGETLB_PREFAULT_HOOK
-#define hugetlb_prefault_arch_hook(mm) do { } while (0)
-#else
-void hugetlb_prefault_arch_hook(struct mm_struct *mm);
-#endif
-
#else /* !CONFIG_HUGETLB_PAGE */
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index e92170dda245..f65e58a1d925 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -613,14 +613,9 @@ struct i2o_sys_tbl {
extern struct list_head i2o_controllers;
/* Message functions */
-static inline struct i2o_message *i2o_msg_get(struct i2o_controller *);
extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int);
-static inline void i2o_msg_post(struct i2o_controller *, struct i2o_message *);
-static inline int i2o_msg_post_wait(struct i2o_controller *,
- struct i2o_message *, unsigned long);
extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *,
unsigned long, struct i2o_dma *);
-static inline void i2o_flush_reply(struct i2o_controller *, u32);
/* IOP functions */
extern int i2o_status_get(struct i2o_controller *);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index f0af504dfa42..32fd77bb4436 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -48,13 +48,6 @@ typedef unsigned char byte; /* used everywhere */
#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
/*
- * Tune flags
- */
-#define IDE_TUNE_NOAUTO 2
-#define IDE_TUNE_AUTO 1
-#define IDE_TUNE_DEFAULT 0
-
-/*
* state flags
*/
@@ -68,23 +61,30 @@ typedef unsigned char byte; /* used everywhere */
*/
#define IDE_NR_PORTS (10)
-#define IDE_DATA_OFFSET (0)
-#define IDE_ERROR_OFFSET (1)
-#define IDE_NSECTOR_OFFSET (2)
-#define IDE_SECTOR_OFFSET (3)
-#define IDE_LCYL_OFFSET (4)
-#define IDE_HCYL_OFFSET (5)
-#define IDE_SELECT_OFFSET (6)
-#define IDE_STATUS_OFFSET (7)
-#define IDE_CONTROL_OFFSET (8)
-#define IDE_IRQ_OFFSET (9)
-
-#define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET
-#define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET
-#define IDE_ALTSTATUS_OFFSET IDE_CONTROL_OFFSET
-#define IDE_IREASON_OFFSET IDE_NSECTOR_OFFSET
-#define IDE_BCOUNTL_OFFSET IDE_LCYL_OFFSET
-#define IDE_BCOUNTH_OFFSET IDE_HCYL_OFFSET
+struct ide_io_ports {
+ unsigned long data_addr;
+
+ union {
+ unsigned long error_addr; /* read: error */
+ unsigned long feature_addr; /* write: feature */
+ };
+
+ unsigned long nsect_addr;
+ unsigned long lbal_addr;
+ unsigned long lbam_addr;
+ unsigned long lbah_addr;
+
+ unsigned long device_addr;
+
+ union {
+ unsigned long status_addr; /*  read: status  */
+ unsigned long command_addr; /* write: command */
+ };
+
+ unsigned long ctl_addr;
+
+ unsigned long irq_addr;
+};
#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
#define BAD_R_STAT (BUSY_STAT | ERR_STAT)
@@ -163,7 +163,11 @@ typedef u8 hwif_chipset_t;
* Structure to hold all information about the location of this port
*/
typedef struct hw_regs_s {
- unsigned long io_ports[IDE_NR_PORTS]; /* task file registers */
+ union {
+ struct ide_io_ports io_ports;
+ unsigned long io_ports_array[IDE_NR_PORTS];
+ };
+
int irq; /* our irq number */
ide_ack_intr_t *ack_intr; /* acknowledge interrupt */
hwif_chipset_t chipset;
@@ -179,10 +183,10 @@ static inline void ide_std_init_ports(hw_regs_t *hw,
{
unsigned int i;
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
- hw->io_ports[i] = io_addr++;
+ for (i = 0; i <= 7; i++)
+ hw->io_ports_array[i] = io_addr++;
- hw->io_ports[IDE_CONTROL_OFFSET] = ctl_addr;
+ hw->io_ports.ctl_addr = ctl_addr;
}
#include <asm/ide.h>
@@ -328,7 +332,6 @@ typedef struct ide_drive_s {
unsigned atapi_overlap : 1; /* ATAPI overlap (not supported) */
unsigned doorlocking : 1; /* for removable only: door lock/unlock works */
unsigned nodma : 1; /* disallow DMA */
- unsigned autotune : 2; /* 0=default, 1=autotune, 2=noautotune */
unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */
unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */
unsigned vdma : 1; /* 1=doing PIO over DMA 0=doing normal DMA */
@@ -432,8 +435,8 @@ typedef struct hwif_s {
char name[6]; /* name of interface, eg. "ide0" */
- /* task file registers for pata and sata */
- unsigned long io_ports[IDE_NR_PORTS];
+ struct ide_io_ports io_ports;
+
unsigned long sata_scr[SATA_NR_PORTS];
ide_drive_t drives[MAX_DRIVES]; /* drive info */
@@ -520,7 +523,6 @@ typedef struct hwif_s {
unsigned present : 1; /* this interface exists */
unsigned serialized : 1; /* serialized all channel operation */
unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
- unsigned reset : 1; /* reset after probe */
unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
unsigned mmio : 1; /* host uses MMIO */
@@ -703,10 +705,6 @@ void ide_add_generic_settings(ide_drive_t *);
read_proc_t proc_ide_read_capacity;
read_proc_t proc_ide_read_geometry;
-#ifdef CONFIG_BLK_DEV_IDEPCI
-void ide_pci_create_host_proc(const char *, get_info_t *);
-#endif
-
/*
* Standard exit stuff:
*/
@@ -807,8 +805,14 @@ int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsig
#ifndef _IDE_C
extern ide_hwif_t ide_hwifs[]; /* master data repository */
#endif
+extern int ide_noacpi;
+extern int ide_acpigtf;
+extern int ide_acpionboot;
extern int noautodma;
+extern int ide_vlb_clk;
+extern int ide_pci_clk;
+
ide_hwif_t *ide_find_port_slot(const struct ide_port_info *);
static inline ide_hwif_t *ide_find_port(void)
@@ -1068,8 +1072,6 @@ enum {
IDE_HFLAG_NO_DMA = (1 << 14),
/* check if host is PCI IDE device before allowing DMA */
IDE_HFLAG_NO_AUTODMA = (1 << 15),
- /* don't autotune PIO */
- IDE_HFLAG_NO_AUTOTUNE = (1 << 16),
/* host is CS5510/CS5520 */
IDE_HFLAG_CS5520 = IDE_HFLAG_VDMA,
/* no LBA48 */
@@ -1215,13 +1217,15 @@ static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
#endif
void ide_remove_port_from_hwgroup(ide_hwif_t *);
-void ide_unregister(unsigned int);
+void ide_unregister(ide_hwif_t *);
void ide_register_region(struct gendisk *);
void ide_unregister_region(struct gendisk *);
void ide_undecoded_slave(ide_drive_t *);
+void ide_port_apply_params(ide_hwif_t *);
+
int ide_device_add_all(u8 *idx, const struct ide_port_info *);
int ide_device_add(u8 idx[4], const struct ide_port_info *);
int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
@@ -1333,29 +1337,28 @@ static inline void ide_set_irq(ide_drive_t *drive, int on)
{
ide_hwif_t *hwif = drive->hwif;
- hwif->OUTB(drive->ctl | (on ? 0 : 2),
- hwif->io_ports[IDE_CONTROL_OFFSET]);
+ hwif->OUTB(drive->ctl | (on ? 0 : 2), hwif->io_ports.ctl_addr);
}
static inline u8 ide_read_status(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- return hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ return hwif->INB(hwif->io_ports.status_addr);
}
static inline u8 ide_read_altstatus(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- return hwif->INB(hwif->io_ports[IDE_CONTROL_OFFSET]);
+ return hwif->INB(hwif->io_ports.ctl_addr);
}
static inline u8 ide_read_error(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- return hwif->INB(hwif->io_ports[IDE_ERROR_OFFSET]);
+ return hwif->INB(hwif->io_ports.error_addr);
}
/*
@@ -1368,7 +1371,7 @@ static inline void ide_atapi_discard_data(ide_drive_t *drive, unsigned bcount)
/* FIXME: use ->atapi_input_bytes */
while (bcount--)
- (void)hwif->INB(hwif->io_ports[IDE_DATA_OFFSET]);
+ (void)hwif->INB(hwif->io_ports.data_addr);
}
static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount)
@@ -1377,7 +1380,7 @@ static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount)
/* FIXME: use ->atapi_output_bytes */
while (bcount--)
- hwif->OUTB(0, hwif->io_ports[IDE_DATA_OFFSET]);
+ hwif->OUTB(0, hwif->io_ports.data_addr);
}
#endif /* _IDE_H */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 37a6f5bc4a92..bf6b8a61f8db 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -9,6 +9,7 @@
#include <linux/ipc.h>
#include <linux/pid_namespace.h>
#include <linux/user_namespace.h>
+#include <linux/securebits.h>
#include <net/net_namespace.h>
#define INIT_FDTABLE \
@@ -172,7 +173,7 @@ extern struct group_info init_groups;
.cap_inheritable = CAP_INIT_INH_SET, \
.cap_permitted = CAP_FULL_SET, \
.cap_bset = CAP_INIT_BSET, \
- .keep_capabilities = 0, \
+ .securebits = SECUREBITS_DEFAULT, \
.user = INIT_USER, \
.comm = "swapper", \
.thread = INIT_THREAD, \
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 0f28486f6360..1036631ff4fa 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -173,6 +173,13 @@ struct kretprobe_blackpoint {
const char *name;
void *addr;
};
+
+struct kprobe_blackpoint {
+ const char *name;
+ unsigned long start_addr;
+ unsigned long range;
+};
+
extern struct kretprobe_blackpoint kretprobe_blacklist[];
static inline void kretprobe_assert(struct kretprobe_instance *ri,
@@ -227,15 +234,21 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
+int register_kprobes(struct kprobe **kps, int num);
+void unregister_kprobes(struct kprobe **kps, int num);
int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
int longjmp_break_handler(struct kprobe *, struct pt_regs *);
int register_jprobe(struct jprobe *p);
void unregister_jprobe(struct jprobe *p);
+int register_jprobes(struct jprobe **jps, int num);
+void unregister_jprobes(struct jprobe **jps, int num);
void jprobe_return(void);
unsigned long arch_deref_entry_point(void *);
int register_kretprobe(struct kretprobe *rp);
void unregister_kretprobe(struct kretprobe *rp);
+int register_kretprobes(struct kretprobe **rps, int num);
+void unregister_kretprobes(struct kretprobe **rps, int num);
void kprobe_flush_task(struct task_struct *tk);
void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
@@ -254,16 +267,30 @@ static inline int register_kprobe(struct kprobe *p)
{
return -ENOSYS;
}
+static inline int register_kprobes(struct kprobe **kps, int num)
+{
+ return -ENOSYS;
+}
static inline void unregister_kprobe(struct kprobe *p)
{
}
+static inline void unregister_kprobes(struct kprobe **kps, int num)
+{
+}
static inline int register_jprobe(struct jprobe *p)
{
return -ENOSYS;
}
+static inline int register_jprobes(struct jprobe **jps, int num)
+{
+ return -ENOSYS;
+}
static inline void unregister_jprobe(struct jprobe *p)
{
}
+static inline void unregister_jprobes(struct jprobe **jps, int num)
+{
+}
static inline void jprobe_return(void)
{
}
@@ -271,9 +298,16 @@ static inline int register_kretprobe(struct kretprobe *rp)
{
return -ENOSYS;
}
+static inline int register_kretprobes(struct kretprobe **rps, int num)
+{
+ return -ENOSYS;
+}
static inline void unregister_kretprobe(struct kretprobe *rp)
{
}
+static inline void unregister_kretprobes(struct kretprobe **rps, int num)
+{
+}
static inline void kprobe_flush_task(struct task_struct *tk)
{
}
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index c1ec04fd000d..a281afeddfbb 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -8,11 +8,18 @@
*/
#include <asm/types.h>
+#include <linux/compiler.h>
#include <linux/ioctl.h>
#include <asm/kvm.h>
#define KVM_API_VERSION 12
+/* for KVM_TRACE_ENABLE */
+struct kvm_user_trace_setup {
+ __u32 buf_size; /* sub_buffer size of each per-cpu */
+ __u32 buf_nr; /* the number of sub_buffers of each per-cpu */
+};
+
/* for KVM_CREATE_MEMORY_REGION */
struct kvm_memory_region {
__u32 slot;
@@ -73,6 +80,9 @@ struct kvm_irqchip {
#define KVM_EXIT_INTR 10
#define KVM_EXIT_SET_TPR 11
#define KVM_EXIT_TPR_ACCESS 12
+#define KVM_EXIT_S390_SIEIC 13
+#define KVM_EXIT_S390_RESET 14
+#define KVM_EXIT_DCR 15
/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
struct kvm_run {
@@ -137,6 +147,27 @@ struct kvm_run {
__u32 is_write;
__u32 pad;
} tpr_access;
+ /* KVM_EXIT_S390_SIEIC */
+ struct {
+ __u8 icptcode;
+ __u64 mask; /* psw upper half */
+ __u64 addr; /* psw lower half */
+ __u16 ipa;
+ __u32 ipb;
+ } s390_sieic;
+ /* KVM_EXIT_S390_RESET */
+#define KVM_S390_RESET_POR 1
+#define KVM_S390_RESET_CLEAR 2
+#define KVM_S390_RESET_SUBSYSTEM 4
+#define KVM_S390_RESET_CPU_INIT 8
+#define KVM_S390_RESET_IPL 16
+ __u64 s390_reset_flags;
+ /* KVM_EXIT_DCR */
+ struct {
+ __u32 dcrn;
+ __u32 data;
+ __u8 is_write;
+ } dcr;
/* Fix the size of the union. */
char padding[256];
};
@@ -204,6 +235,74 @@ struct kvm_vapic_addr {
__u64 vapic_addr;
};
+/* for KVM_SET_MPSTATE */
+
+#define KVM_MP_STATE_RUNNABLE 0
+#define KVM_MP_STATE_UNINITIALIZED 1
+#define KVM_MP_STATE_INIT_RECEIVED 2
+#define KVM_MP_STATE_HALTED 3
+#define KVM_MP_STATE_SIPI_RECEIVED 4
+
+struct kvm_mp_state {
+ __u32 mp_state;
+};
+
+struct kvm_s390_psw {
+ __u64 mask;
+ __u64 addr;
+};
+
+/* valid values for type in kvm_s390_interrupt */
+#define KVM_S390_SIGP_STOP 0xfffe0000u
+#define KVM_S390_PROGRAM_INT 0xfffe0001u
+#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u
+#define KVM_S390_RESTART 0xfffe0003u
+#define KVM_S390_INT_VIRTIO 0xffff2603u
+#define KVM_S390_INT_SERVICE 0xffff2401u
+#define KVM_S390_INT_EMERGENCY 0xffff1201u
+
+struct kvm_s390_interrupt {
+ __u32 type;
+ __u32 parm;
+ __u64 parm64;
+};
+
+#define KVM_TRC_SHIFT 16
+/*
+ * kvm trace categories
+ */
+#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT)
+#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1)) /* only 12 bits */
+
+/*
+ * kvm trace action
+ */
+#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01)
+#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02)
+#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01)
+
+#define KVM_TRC_HEAD_SIZE 12
+#define KVM_TRC_CYCLE_SIZE 8
+#define KVM_TRC_EXTRA_MAX 7
+
+/* This structure represents a single trace buffer record. */
+struct kvm_trace_rec {
+ __u32 event:28;
+ __u32 extra_u32:3;
+ __u32 cycle_in:1;
+ __u32 pid;
+ __u32 vcpu_id;
+ union {
+ struct {
+ __u32 cycle_lo, cycle_hi;
+ __u32 extra_u32[KVM_TRC_EXTRA_MAX];
+ } cycle;
+ struct {
+ __u32 extra_u32[KVM_TRC_EXTRA_MAX];
+ } nocycle;
+ } u;
+};
+
#define KVMIO 0xAE
/*
@@ -212,6 +311,8 @@ struct kvm_vapic_addr {
#define KVM_GET_API_VERSION _IO(KVMIO, 0x00)
#define KVM_CREATE_VM _IO(KVMIO, 0x01) /* returns a VM fd */
#define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 0x02, struct kvm_msr_list)
+
+#define KVM_S390_ENABLE_SIE _IO(KVMIO, 0x06)
/*
* Check if a kvm extension is available. Argument is extension number,
* return is 1 (yes) or 0 (no, sorry).
@@ -222,7 +323,12 @@ struct kvm_vapic_addr {
*/
#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */
#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
-
+/*
+ * ioctls for kvm trace
+ */
+#define KVM_TRACE_ENABLE _IOW(KVMIO, 0x06, struct kvm_user_trace_setup)
+#define KVM_TRACE_PAUSE _IO(KVMIO, 0x07)
+#define KVM_TRACE_DISABLE _IO(KVMIO, 0x08)
/*
* Extension capability list.
*/
@@ -233,6 +339,13 @@ struct kvm_vapic_addr {
#define KVM_CAP_SET_TSS_ADDR 4
#define KVM_CAP_VAPIC 6
#define KVM_CAP_EXT_CPUID 7
+#define KVM_CAP_CLOCKSOURCE 8
+#define KVM_CAP_NR_VCPUS 9 /* returns max vcpus per vm */
+#define KVM_CAP_NR_MEMSLOTS 10 /* returns max memory slots per vm */
+#define KVM_CAP_PIT 11
+#define KVM_CAP_NOP_IO_DELAY 12
+#define KVM_CAP_PV_MMU 13
+#define KVM_CAP_MP_STATE 14
/*
* ioctls for VM fds
@@ -255,6 +368,9 @@ struct kvm_vapic_addr {
#define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level)
#define KVM_GET_IRQCHIP _IOWR(KVMIO, 0x62, struct kvm_irqchip)
#define KVM_SET_IRQCHIP _IOR(KVMIO, 0x63, struct kvm_irqchip)
+#define KVM_CREATE_PIT _IO(KVMIO, 0x64)
+#define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state)
+#define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state)
/*
* ioctls for vcpu fds
@@ -281,5 +397,17 @@ struct kvm_vapic_addr {
#define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl)
/* Available with KVM_CAP_VAPIC */
#define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr)
+/* valid for virtual machine (for floating interrupt)_and_ vcpu */
+#define KVM_S390_INTERRUPT _IOW(KVMIO, 0x94, struct kvm_s390_interrupt)
+/* store status for s390 */
+#define KVM_S390_STORE_STATUS_NOADDR (-1ul)
+#define KVM_S390_STORE_STATUS_PREFIXED (-2ul)
+#define KVM_S390_STORE_STATUS _IOW(KVMIO, 0x95, unsigned long)
+/* initial ipl psw for s390 */
+#define KVM_S390_SET_INITIAL_PSW _IOW(KVMIO, 0x96, struct kvm_s390_psw)
+/* initial reset for s390 */
+#define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97)
+#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state)
+#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state)
#endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 928b0d59e9ba..398978972b7a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -15,6 +15,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/preempt.h>
+#include <linux/marker.h>
#include <asm/signal.h>
#include <linux/kvm.h>
@@ -24,29 +25,18 @@
#include <asm/kvm_host.h>
-#define KVM_MAX_VCPUS 4
-#define KVM_MEMORY_SLOTS 8
-/* memory slots that does not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS 4
-
-#define KVM_PIO_PAGE_OFFSET 1
-
/*
* vcpu->requests bit members
*/
#define KVM_REQ_TLB_FLUSH 0
#define KVM_REQ_MIGRATE_TIMER 1
#define KVM_REQ_REPORT_TPR_ACCESS 2
+#define KVM_REQ_MMU_RELOAD 3
+#define KVM_REQ_TRIPLE_FAULT 4
struct kvm_vcpu;
extern struct kmem_cache *kvm_vcpu_cache;
-struct kvm_guest_debug {
- int enabled;
- unsigned long bp[4];
- int singlestep;
-};
-
/*
* It would be nice to use something smarter than a linear search, TBD...
* Thankfully we dont expect many devices to register (famous last words :),
@@ -67,7 +57,9 @@ void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
struct kvm_vcpu {
struct kvm *kvm;
+#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier preempt_notifier;
+#endif
int vcpu_id;
struct mutex mutex;
int cpu;
@@ -100,6 +92,10 @@ struct kvm_memory_slot {
unsigned long flags;
unsigned long *rmap;
unsigned long *dirty_bitmap;
+ struct {
+ unsigned long rmap_pde;
+ int write_count;
+ } *lpage_info;
unsigned long userspace_addr;
int user_alloc;
};
@@ -114,11 +110,11 @@ struct kvm {
KVM_PRIVATE_MEM_SLOTS];
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
struct list_head vm_list;
- struct file *filp;
struct kvm_io_bus mmio_bus;
struct kvm_io_bus pio_bus;
struct kvm_vm_stat stat;
struct kvm_arch arch;
+ atomic_t users_count;
};
/* The guest did something we don't support. */
@@ -145,14 +141,19 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
struct module *module);
void kvm_exit(void);
+void kvm_get_kvm(struct kvm *kvm);
+void kvm_put_kvm(struct kvm *kvm);
+
#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
extern struct page *bad_page;
+extern pfn_t bad_pfn;
int is_error_page(struct page *page);
+int is_error_pfn(pfn_t pfn);
int kvm_is_error_hva(unsigned long addr);
int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
@@ -166,8 +167,19 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
int user_alloc);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
+unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
+void kvm_set_page_dirty(struct page *page);
+void kvm_set_page_accessed(struct page *page);
+
+pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
+void kvm_release_pfn_dirty(pfn_t);
+void kvm_release_pfn_clean(pfn_t pfn);
+void kvm_set_pfn_dirty(pfn_t pfn);
+void kvm_set_pfn_accessed(pfn_t pfn);
+void kvm_get_pfn(pfn_t pfn);
+
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
@@ -188,6 +200,7 @@ void kvm_resched(struct kvm_vcpu *vcpu);
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_flush_remote_tlbs(struct kvm *kvm);
+void kvm_reload_remote_mmus(struct kvm *kvm);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -223,6 +236,10 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs);
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs);
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state);
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state);
int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
struct kvm_debug_guest *dbg);
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
@@ -255,6 +272,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
static inline void kvm_guest_enter(void)
@@ -296,5 +314,18 @@ struct kvm_stats_debugfs_item {
struct dentry *dentry;
};
extern struct kvm_stats_debugfs_item debugfs_entries[];
+extern struct dentry *kvm_debugfs_dir;
+
+#ifdef CONFIG_KVM_TRACE
+int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg);
+void kvm_trace_cleanup(void);
+#else
+static inline
+int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
+{
+ return -EINVAL;
+}
+#define kvm_trace_cleanup() ((void)0)
+#endif
#endif
diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h
index 5497aac0d2f8..3ddce03766ca 100644
--- a/include/linux/kvm_para.h
+++ b/include/linux/kvm_para.h
@@ -11,8 +11,11 @@
/* Return values for hypercalls */
#define KVM_ENOSYS 1000
+#define KVM_EFAULT EFAULT
+#define KVM_E2BIG E2BIG
-#define KVM_HC_VAPIC_POLL_IRQ 1
+#define KVM_HC_VAPIC_POLL_IRQ 1
+#define KVM_HC_MMU_OP 2
/*
* hypercalls use architecture specific
@@ -20,6 +23,12 @@
#include <asm/kvm_para.h>
#ifdef __KERNEL__
+#ifdef CONFIG_KVM_GUEST
+void __init kvm_guest_init(void);
+#else
+#define kvm_guest_init() do { } while (0)
+#endif
+
static inline int kvm_para_has_feature(unsigned int feature)
{
if (kvm_arch_para_features() & (1UL << feature))
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 1c4e46decb22..9b6f395c9625 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -38,6 +38,8 @@ typedef unsigned long hva_t;
typedef u64 hpa_t;
typedef unsigned long hfn_t;
+typedef hfn_t pfn_t;
+
struct kvm_pio_request {
unsigned long count;
int cur_count;
diff --git a/include/linux/list.h b/include/linux/list.h
index dac16f99c701..b4a939b6b625 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -319,6 +319,15 @@ static inline int list_empty_careful(const struct list_head *head)
return (next == head) && (next == head->prev);
}
+/**
+ * list_is_singular - tests whether a list has just one entry.
+ * @head: the list to test.
+ */
+static inline int list_is_singular(const struct list_head *head)
+{
+ return !list_empty(head) && (head->next == head->prev);
+}
+
static inline void __list_splice(struct list_head *list,
struct list_head *head)
{
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 8fee7a45736b..73e358612eaf 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -8,8 +8,18 @@
struct page;
struct zone;
struct pglist_data;
+struct mem_section;
#ifdef CONFIG_MEMORY_HOTPLUG
+
+/*
+ * Magic number for free bootmem.
+ * The normal smallest mapcount is -1. Here is smaller value than it.
+ */
+#define SECTION_INFO 0xfffffffe
+#define MIX_INFO 0xfffffffd
+#define NODE_INFO 0xfffffffc
+
/*
* pgdat resizing functions
*/
@@ -64,9 +74,11 @@ extern int offline_pages(unsigned long, unsigned long, unsigned long);
/* reasonably generic interface to expand the physical pages in a zone */
extern int __add_pages(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages);
+extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
+ unsigned long nr_pages);
/*
- * Walk thorugh all memory which is registered as resource.
+ * Walk through all memory which is registered as resource.
* arg is (start_pfn, nr_pages, private_arg_pointer)
*/
extern int walk_memory_resource(unsigned long start_pfn,
@@ -142,6 +154,18 @@ static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
#endif /* CONFIG_NUMA */
#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+}
+static inline void put_page_bootmem(struct page *page)
+{
+}
+#else
+extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
+extern void put_page_bootmem(struct page *page);
+#endif
+
#else /* ! CONFIG_MEMORY_HOTPLUG */
/*
* Stub functions for when hotplug is off
@@ -169,6 +193,10 @@ static inline int mhp_notimplemented(const char *func)
return -ENOSYS;
}
+static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+}
+
#endif /* ! CONFIG_MEMORY_HOTPLUG */
extern int add_memory(int nid, u64 start, u64 size);
@@ -176,5 +204,8 @@ extern int arch_add_memory(int nid, u64 start, u64 size);
extern int remove_memory(u64 start, u64 size);
extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
int nr_pages);
+extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
+extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
+ unsigned long pnum);
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 59c4865bc85f..3a39570b81b8 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -8,15 +8,32 @@
* Copyright 2003,2004 Andi Kleen SuSE Labs
*/
+/*
+ * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
+ * passed by the user to either set_mempolicy() or mbind() in an 'int' actual.
+ * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags.
+ */
+
/* Policies */
-#define MPOL_DEFAULT 0
-#define MPOL_PREFERRED 1
-#define MPOL_BIND 2
-#define MPOL_INTERLEAVE 3
+enum {
+ MPOL_DEFAULT,
+ MPOL_PREFERRED,
+ MPOL_BIND,
+ MPOL_INTERLEAVE,
+ MPOL_MAX, /* always last member of enum */
+};
-#define MPOL_MAX MPOL_INTERLEAVE
+/* Flags for set_mempolicy */
+#define MPOL_F_STATIC_NODES (1 << 15)
+#define MPOL_F_RELATIVE_NODES (1 << 14)
-/* Flags for get_mem_policy */
+/*
+ * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
+ * either set_mempolicy() or mbind().
+ */
+#define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
+
+/* Flags for get_mempolicy */
#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
#define MPOL_F_ADDR (1<<1) /* look up vma using address */
#define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
@@ -27,6 +44,14 @@
#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
+/*
+ * Internal flags that share the struct mempolicy flags word with
+ * "mode flags". These flags are allocated from bit 0 up, as they
+ * are never OR'ed into the mode in mempolicy API arguments.
+ */
+#define MPOL_F_SHARED (1 << 0) /* identify shared policies */
+#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
+
#ifdef __KERNEL__
#include <linux/mmzone.h>
@@ -35,7 +60,6 @@
#include <linux/spinlock.h>
#include <linux/nodemask.h>
-struct vm_area_struct;
struct mm_struct;
#ifdef CONFIG_NUMA
@@ -54,22 +78,27 @@ struct mm_struct;
* mmap_sem.
*
* Freeing policy:
- * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd.
- * All other policies don't have any external state. mpol_free() handles this.
+ * Mempolicy objects are reference counted. A mempolicy will be freed when
+ * mpol_put() decrements the reference count to zero.
*
- * Copying policy objects:
- * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this.
+ * Duplicating policy objects:
+ * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
+ * to the new storage. The reference count of the new object is initialized
+ * to 1, representing the caller of mpol_dup().
*/
struct mempolicy {
atomic_t refcnt;
- short policy; /* See MPOL_* above */
+ unsigned short mode; /* See MPOL_* above */
+ unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
union {
- struct zonelist *zonelist; /* bind */
short preferred_node; /* preferred */
- nodemask_t nodes; /* interleave */
+ nodemask_t nodes; /* interleave/bind */
/* undefined for default */
} v;
- nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */
+ union {
+ nodemask_t cpuset_mems_allowed; /* relative to these nodes */
+ nodemask_t user_nodemask; /* nodemask passed by user */
+ } w;
};
/*
@@ -77,18 +106,43 @@ struct mempolicy {
* The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
*/
-extern void __mpol_free(struct mempolicy *pol);
-static inline void mpol_free(struct mempolicy *pol)
+extern void __mpol_put(struct mempolicy *pol);
+static inline void mpol_put(struct mempolicy *pol)
{
if (pol)
- __mpol_free(pol);
+ __mpol_put(pol);
}
-extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
-static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
+/*
+ * Does mempolicy pol need explicit unref after use?
+ * Currently only needed for shared policies.
+ */
+static inline int mpol_needs_cond_ref(struct mempolicy *pol)
+{
+ return (pol && (pol->flags & MPOL_F_SHARED));
+}
+
+static inline void mpol_cond_put(struct mempolicy *pol)
+{
+ if (mpol_needs_cond_ref(pol))
+ __mpol_put(pol);
+}
+
+extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
+ struct mempolicy *frompol);
+static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
+ struct mempolicy *frompol)
+{
+ if (!frompol)
+ return frompol;
+ return __mpol_cond_copy(tompol, frompol);
+}
+
+extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
+static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
{
if (pol)
- pol = __mpol_copy(pol);
+ pol = __mpol_dup(pol);
return pol;
}
@@ -108,11 +162,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
return 1;
return __mpol_equal(a, b);
}
-#define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b))
-
-/* Could later add inheritance of the process policy here. */
-
-#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
/*
* Tree of shared policies for a shared memory region.
@@ -133,8 +182,7 @@ struct shared_policy {
spinlock_t lock;
};
-void mpol_shared_policy_init(struct shared_policy *info, int policy,
- nodemask_t *nodes);
+void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
int mpol_set_shared_policy(struct shared_policy *info,
struct vm_area_struct *vma,
struct mempolicy *new);
@@ -149,9 +197,9 @@ extern void mpol_rebind_task(struct task_struct *tsk,
extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
extern void mpol_fix_fork_child_flag(struct task_struct *p);
-extern struct mempolicy default_policy;
extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
- unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol);
+ unsigned long addr, gfp_t gfp_flags,
+ struct mempolicy **mpol, nodemask_t **nodemask);
extern unsigned slab_node(struct mempolicy *policy);
extern enum zone_type policy_zone;
@@ -165,6 +213,13 @@ static inline void check_highest_zone(enum zone_type k)
int do_migrate_pages(struct mm_struct *mm,
const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
+
+#ifdef CONFIG_TMPFS
+extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
+
+extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
+ int no_context);
+#endif
#else
struct mempolicy {};
@@ -173,19 +228,26 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
return 1;
}
-#define vma_mpol_equal(a,b) 1
-#define mpol_set_vma_default(vma) do {} while(0)
+static inline void mpol_put(struct mempolicy *p)
+{
+}
+
+static inline void mpol_cond_put(struct mempolicy *pol)
+{
+}
-static inline void mpol_free(struct mempolicy *p)
+static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
+ struct mempolicy *from)
{
+ return from;
}
static inline void mpol_get(struct mempolicy *pol)
{
}
-static inline struct mempolicy *mpol_copy(struct mempolicy *old)
+static inline struct mempolicy *mpol_dup(struct mempolicy *old)
{
return NULL;
}
@@ -199,8 +261,8 @@ static inline int mpol_set_shared_policy(struct shared_policy *info,
return -EINVAL;
}
-static inline void mpol_shared_policy_init(struct shared_policy *info,
- int policy, nodemask_t *nodes)
+static inline void mpol_shared_policy_init(struct shared_policy *sp,
+ struct mempolicy *mpol)
{
}
@@ -239,9 +301,12 @@ static inline void mpol_fix_fork_child_flag(struct task_struct *p)
}
static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
- unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol)
+ unsigned long addr, gfp_t gfp_flags,
+ struct mempolicy **mpol, nodemask_t **nodemask)
{
- return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags);
+ *mpol = NULL;
+ *nodemask = NULL;
+ return node_zonelist(0, gfp_flags);
}
static inline int do_migrate_pages(struct mm_struct *mm,
@@ -254,6 +319,21 @@ static inline int do_migrate_pages(struct mm_struct *mm,
static inline void check_highest_zone(int k)
{
}
+
+#ifdef CONFIG_TMPFS
+static inline int mpol_parse_str(char *str, struct mempolicy **mpol,
+ int no_context)
+{
+ return 1; /* error */
+}
+
+static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
+ int no_context)
+{
+ return 0;
+}
+#endif
+
#endif /* CONFIG_NUMA */
#endif /* __KERNEL__ */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index ff7df1a2222f..9fa1a8002ce2 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -208,6 +208,38 @@ struct mlx4_mtt {
int page_shift;
};
+enum {
+ MLX4_DB_PER_PAGE = PAGE_SIZE / 4
+};
+
+struct mlx4_db_pgdir {
+ struct list_head list;
+ DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
+ DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
+ unsigned long *bits[2];
+ __be32 *db_page;
+ dma_addr_t db_dma;
+};
+
+struct mlx4_ib_user_db_page;
+
+struct mlx4_db {
+ __be32 *db;
+ union {
+ struct mlx4_db_pgdir *pgdir;
+ struct mlx4_ib_user_db_page *user_page;
+ } u;
+ dma_addr_t dma;
+ int index;
+ int order;
+};
+
+struct mlx4_hwq_resources {
+ struct mlx4_db db;
+ struct mlx4_mtt mtt;
+ struct mlx4_buf buf;
+};
+
struct mlx4_mr {
struct mlx4_mtt mtt;
u64 iova;
@@ -341,6 +373,14 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
struct mlx4_buf *buf);
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
+void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
+
+int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
+ int size, int max_direct);
+void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
+ int size);
+
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index a5e43febee4f..7f128b266faa 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -296,6 +296,10 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
struct mlx4_qp_context *context);
+int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ struct mlx4_qp_context *context,
+ struct mlx4_qp *qp, enum mlx4_qp_state *qp_state);
+
static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
{
return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1));
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 286d31521605..8b7f4a5d4f6a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -107,6 +107,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */
#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
+#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
@@ -164,8 +165,6 @@ struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
- struct page *(*nopage)(struct vm_area_struct *area,
- unsigned long address, int *type);
unsigned long (*nopfn)(struct vm_area_struct *area,
unsigned long address);
@@ -173,7 +172,25 @@ struct vm_operations_struct {
* writable, if an error is returned it will cause a SIGBUS */
int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
#ifdef CONFIG_NUMA
+ /*
+ * set_policy() op must add a reference to any non-NULL @new mempolicy
+ * to hold the policy upon return. Caller should pass NULL @new to
+ * remove a policy and fall back to surrounding context--i.e. do not
+ * install a MPOL_DEFAULT policy, nor the task or system default
+ * mempolicy.
+ */
int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
+
+ /*
+ * get_policy() op must add reference [mpol_get()] to any policy at
+ * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
+ * in mm/mempolicy.c will do this automatically.
+ * get_policy() must NOT add a ref if the policy at (vma,addr) is not
+ * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
+ * If no [shared/vma] mempolicy exists at the addr, get_policy() op
+ * must return NULL--i.e., do not "fallback" to task or system default
+ * policy.
+ */
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
unsigned long addr);
int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
@@ -397,11 +414,11 @@ static inline void set_compound_order(struct page *page, unsigned long order)
* we have run out of space and have to fall back to an
* alternate (slower) way of determining the node.
*
- * No sparsemem: | NODE | ZONE | ... | FLAGS |
- * with space for node: | SECTION | NODE | ZONE | ... | FLAGS |
- * no space for node: | SECTION | ZONE | ... | FLAGS |
+ * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
+ * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
+ * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
*/
-#ifdef CONFIG_SPARSEMEM
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTIONS_WIDTH SECTIONS_SHIFT
#else
#define SECTIONS_WIDTH 0
@@ -409,9 +426,12 @@ static inline void set_compound_order(struct page *page, unsigned long order)
#define ZONES_WIDTH ZONES_SHIFT
-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED
+#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
#define NODES_WIDTH NODES_SHIFT
#else
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#error "Vmemmap: No space for nodes field in page flags"
+#endif
#define NODES_WIDTH 0
#endif
@@ -454,8 +474,8 @@ static inline void set_compound_order(struct page *page, unsigned long order)
#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
-#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
-#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
+#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
+#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#endif
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
@@ -504,10 +524,12 @@ static inline struct zone *page_zone(struct page *page)
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
}
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
static inline unsigned long page_to_section(struct page *page)
{
return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}
+#endif
static inline void set_page_zone(struct page *page, enum zone_type zone)
{
@@ -602,9 +624,12 @@ static inline struct address_space *page_mapping(struct page *page)
struct address_space *mapping = page->mapping;
VM_BUG_ON(PageSlab(page));
+#ifdef CONFIG_SWAP
if (unlikely(PageSwapCache(page)))
mapping = &swapper_space;
- else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
+ else
+#endif
+ if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
mapping = NULL;
return mapping;
}
@@ -649,12 +674,6 @@ static inline int page_mapped(struct page *page)
}
/*
- * Error return values for the *_nopage functions
- */
-#define NOPAGE_SIGBUS (NULL)
-#define NOPAGE_OOM ((struct page *) (-1))
-
-/*
* Error return values for the *_nopfn functions
*/
#define NOPFN_SIGBUS ((unsigned long) -1)
@@ -720,7 +739,9 @@ struct zap_details {
unsigned long truncate_count; /* Compare vm_truncate_count */
};
-struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t);
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte);
+
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *);
unsigned long unmap_vmas(struct mmu_gather **tlb,
@@ -1149,6 +1170,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
+int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
struct page *follow_page(struct vm_area_struct *, unsigned long address,
unsigned int foll_flags);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index af190ceab971..29adaa781cb6 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -172,6 +172,7 @@ struct mm_struct {
atomic_t mm_users; /* How many users with user space? */
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
int map_count; /* number of VMAs */
+ int core_waiters;
struct rw_semaphore mmap_sem;
spinlock_t page_table_lock; /* Protects page tables and some counters */
@@ -216,11 +217,10 @@ struct mm_struct {
unsigned long flags; /* Must use atomic bitops to access the bits */
/* coredumping support */
- int core_waiters;
struct completion *core_startup_done, core_done;
/* aio bits */
- rwlock_t ioctx_list_lock;
+ rwlock_t ioctx_list_lock; /* aio lock */
struct kioctx *ioctx_list;
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
struct mem_cgroup *mem_cgroup;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9f274a687c7e..aad98003176f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -3,6 +3,7 @@
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
+#ifndef __GENERATING_BOUNDS_H
#include <linux/spinlock.h>
#include <linux/list.h>
@@ -15,6 +16,7 @@
#include <linux/seqlock.h>
#include <linux/nodemask.h>
#include <linux/pageblock-flags.h>
+#include <linux/bounds.h>
#include <asm/atomic.h>
#include <asm/page.h>
@@ -129,6 +131,8 @@ struct per_cpu_pageset {
#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
#endif
+#endif /* !__GENERATING_BOUNDS.H */
+
enum zone_type {
#ifdef CONFIG_ZONE_DMA
/*
@@ -177,9 +181,11 @@ enum zone_type {
ZONE_HIGHMEM,
#endif
ZONE_MOVABLE,
- MAX_NR_ZONES
+ __MAX_NR_ZONES
};
+#ifndef __GENERATING_BOUNDS_H
+
/*
* When a memory allocation must conform to specific limitations (such
* as being suitable for DMA) the caller will pass in hints to the
@@ -188,28 +194,15 @@ enum zone_type {
* match the requested limits. See gfp_zone() in include/linux/gfp.h
*/
-/*
- * Count the active zones. Note that the use of defined(X) outside
- * #if and family is not necessarily defined so ensure we cannot use
- * it later. Use __ZONE_COUNT to work out how many shift bits we need.
- */
-#define __ZONE_COUNT ( \
- defined(CONFIG_ZONE_DMA) \
- + defined(CONFIG_ZONE_DMA32) \
- + 1 \
- + defined(CONFIG_HIGHMEM) \
- + 1 \
-)
-#if __ZONE_COUNT < 2
+#if MAX_NR_ZONES < 2
#define ZONES_SHIFT 0
-#elif __ZONE_COUNT <= 2
+#elif MAX_NR_ZONES <= 2
#define ZONES_SHIFT 1
-#elif __ZONE_COUNT <= 4
+#elif MAX_NR_ZONES <= 4
#define ZONES_SHIFT 2
#else
#error ZONES_SHIFT -- too many zones configured adjust calculation
#endif
-#undef __ZONE_COUNT
struct zone {
/* Fields commonly accessed by the page allocator */
@@ -393,10 +386,10 @@ static inline int zone_is_oom_locked(const struct zone *zone)
* The NUMA zonelists are doubled becausse we need zonelists that restrict the
* allocations to a single node for GFP_THISNODE.
*
- * [0 .. MAX_NR_ZONES -1] : Zonelists with fallback
- * [MAZ_NR_ZONES ... MAZ_ZONELISTS -1] : No fallback (GFP_THISNODE)
+ * [0] : Zonelist with fallback
+ * [1] : No fallback (GFP_THISNODE)
*/
-#define MAX_ZONELISTS (2 * MAX_NR_ZONES)
+#define MAX_ZONELISTS 2
/*
@@ -464,11 +457,20 @@ struct zonelist_cache {
unsigned long last_full_zap; /* when last zap'd (jiffies) */
};
#else
-#define MAX_ZONELISTS MAX_NR_ZONES
+#define MAX_ZONELISTS 1
struct zonelist_cache;
#endif
/*
+ * This struct contains information about a zone in a zonelist. It is stored
+ * here to avoid dereferences into large structures and lookups of tables
+ */
+struct zoneref {
+ struct zone *zone; /* Pointer to actual zone */
+ int zone_idx; /* zone_idx(zoneref->zone) */
+};
+
+/*
* One allocation request operates on a zonelist. A zonelist
* is a list of zones, the first one is the 'goal' of the
* allocation, the other zones are fallback zones, in decreasing
@@ -476,34 +478,23 @@ struct zonelist_cache;
*
* If zlcache_ptr is not NULL, then it is just the address of zlcache,
* as explained above. If zlcache_ptr is NULL, there is no zlcache.
+ * *
+ * To speed the reading of the zonelist, the zonerefs contain the zone index
+ * of the entry being read. Helper functions to access information given
+ * a struct zoneref are
+ *
+ * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
+ * zonelist_zone_idx() - Return the index of the zone for an entry
+ * zonelist_node_idx() - Return the index of the node for an entry
*/
-
struct zonelist {
struct zonelist_cache *zlcache_ptr; // NULL or &zlcache
- struct zone *zones[MAX_ZONES_PER_ZONELIST + 1]; // NULL delimited
+ struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
#ifdef CONFIG_NUMA
struct zonelist_cache zlcache; // optional ...
#endif
};
-#ifdef CONFIG_NUMA
-/*
- * Only custom zonelists like MPOL_BIND need to be filtered as part of
- * policies. As described in the comment for struct zonelist_cache, these
- * zonelists will not have a zlcache so zlcache_ptr will not be set. Use
- * that to determine if the zonelists needs to be filtered or not.
- */
-static inline int alloc_should_filter_zonelist(struct zonelist *zonelist)
-{
- return !zonelist->zlcache_ptr;
-}
-#else
-static inline int alloc_should_filter_zonelist(struct zonelist *zonelist)
-{
- return 0;
-}
-#endif /* CONFIG_NUMA */
-
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
struct node_active_region {
unsigned long start_pfn;
@@ -637,9 +628,10 @@ static inline int is_normal_idx(enum zone_type idx)
static inline int is_highmem(struct zone *zone)
{
#ifdef CONFIG_HIGHMEM
- int zone_idx = zone - zone->zone_pgdat->node_zones;
- return zone_idx == ZONE_HIGHMEM ||
- (zone_idx == ZONE_MOVABLE && zone_movable_is_highmem());
+ int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
+ return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
+ (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
+ zone_movable_is_highmem());
#else
return 0;
#endif
@@ -730,32 +722,103 @@ extern struct zone *next_zone(struct zone *zone);
zone; \
zone = next_zone(zone))
-#ifdef CONFIG_SPARSEMEM
-#include <asm/sparsemem.h>
-#endif
+static inline struct zone *zonelist_zone(struct zoneref *zoneref)
+{
+ return zoneref->zone;
+}
-#if BITS_PER_LONG == 32
-/*
- * with 32 bit page->flags field, we reserve 9 bits for node/zone info.
- * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes.
+static inline int zonelist_zone_idx(struct zoneref *zoneref)
+{
+ return zoneref->zone_idx;
+}
+
+static inline int zonelist_node_idx(struct zoneref *zoneref)
+{
+#ifdef CONFIG_NUMA
+ /* zone_to_nid not available in this context */
+ return zoneref->zone->node;
+#else
+ return 0;
+#endif /* CONFIG_NUMA */
+}
+
+/**
+ * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
+ * @z - The cursor used as a starting point for the search
+ * @highest_zoneidx - The zone index of the highest zone to return
+ * @nodes - An optional nodemask to filter the zonelist with
+ * @zone - The first suitable zone found is returned via this parameter
+ *
+ * This function returns the next zone at or below a given zone index that is
+ * within the allowed nodemask using a cursor as the starting point for the
+ * search. The zoneref returned is a cursor that is used as the next starting
+ * point for future calls to next_zones_zonelist().
*/
-#define FLAGS_RESERVED 9
+struct zoneref *next_zones_zonelist(struct zoneref *z,
+ enum zone_type highest_zoneidx,
+ nodemask_t *nodes,
+ struct zone **zone);
-#elif BITS_PER_LONG == 64
-/*
- * with 64 bit flags field, there's plenty of room.
+/**
+ * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
+ * @zonelist - The zonelist to search for a suitable zone
+ * @highest_zoneidx - The zone index of the highest zone to return
+ * @nodes - An optional nodemask to filter the zonelist with
+ * @zone - The first suitable zone found is returned via this parameter
+ *
+ * This function returns the first zone at or below a given zone index that is
+ * within the allowed nodemask. The zoneref returned is a cursor that can be
+ * used to iterate the zonelist with next_zones_zonelist. The cursor should
+ * not be used by the caller as it does not match the value of the zone
+ * returned.
*/
-#define FLAGS_RESERVED 32
+static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
+ enum zone_type highest_zoneidx,
+ nodemask_t *nodes,
+ struct zone **zone)
+{
+ return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
+ zone);
+}
-#else
+/**
+ * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
+ * @zone - The current zone in the iterator
+ * @z - The current pointer within zonelist->zones being iterated
+ * @zlist - The zonelist being iterated
+ * @highidx - The zone index of the highest zone to return
+ * @nodemask - Nodemask allowed by the allocator
+ *
+ * This iterator iterates though all zones at or below a given zone index and
+ * within a given nodemask
+ */
+#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
+ for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
+ zone; \
+ z = next_zones_zonelist(z, highidx, nodemask, &zone)) \
-#error BITS_PER_LONG not defined
+/**
+ * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
+ * @zone - The current zone in the iterator
+ * @z - The current pointer within zonelist->zones being iterated
+ * @zlist - The zonelist being iterated
+ * @highidx - The zone index of the highest zone to return
+ *
+ * This iterator iterates though all zones at or below a given zone index.
+ */
+#define for_each_zone_zonelist(zone, z, zlist, highidx) \
+ for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
+#ifdef CONFIG_SPARSEMEM
+#include <asm/sparsemem.h>
#endif
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
!defined(CONFIG_ARCH_POPULATES_NODE_MAP)
-#define early_pfn_to_nid(nid) (0UL)
+static inline unsigned long early_pfn_to_nid(unsigned long pfn)
+{
+ return 0;
+}
#endif
#ifdef CONFIG_FLATMEM
@@ -833,6 +896,7 @@ static inline struct mem_section *__nr_to_section(unsigned long nr)
return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
}
extern int __section_nr(struct mem_section* ms);
+extern unsigned long usemap_size(void);
/*
* We use the lower bits of the mem_map pointer to store
@@ -938,6 +1002,7 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
#define pfn_valid_within(pfn) (1)
#endif
+#endif /* !__GENERATING_BOUNDS.H */
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _LINUX_MMZONE_H */
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index f950921523f5..b03b27457413 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -58,7 +58,11 @@
#define MSDOS_DOTDOT ".. " /* "..", padded to MSDOS_NAME chars */
/* media of boot sector */
-#define FAT_VALID_MEDIA(x) ((0xF8 <= (x) && (x) <= 0xFF) || (x) == 0xF0)
+static inline int fat_valid_media(u8 media)
+{
+ return 0xf8 <= media || media == 0xf0;
+}
+
#define FAT_FIRST_ENT(s, x) ((MSDOS_SB(s)->fat_bits == 32 ? 0x0FFFFF00 : \
MSDOS_SB(s)->fat_bits == 16 ? 0xFF00 : 0xF00) | (x))
@@ -195,6 +199,7 @@ struct fat_mount_options {
char *iocharset; /* Charset used for filename input/display */
unsigned short shortname; /* flags for shortname display/create rule */
unsigned char name_check; /* r = relaxed, n = normal, s = strict */
+ unsigned short allow_utime;/* permission for setting the [am]time */
unsigned quiet:1, /* set = fake successful chmods and chowns */
showexec:1, /* set = only set x bit for com/exe/bat */
sys_immutable:1, /* set = system files are immutable */
@@ -232,6 +237,7 @@ struct msdos_sb_info {
struct mutex fat_lock;
unsigned int prev_free; /* previously allocated cluster number */
unsigned int free_clusters; /* -1 if undefined */
+ unsigned int free_clus_valid; /* is free_clusters valid? */
struct fat_mount_options options;
struct nls_table *nls_disk; /* Codepage used on disk */
struct nls_table *nls_io; /* Charset used for input and display */
@@ -401,7 +407,7 @@ extern int fat_generic_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern const struct file_operations fat_file_operations;
extern const struct inode_operations fat_file_inode_operations;
-extern int fat_notify_change(struct dentry * dentry, struct iattr * attr);
+extern int fat_setattr(struct dentry * dentry, struct iattr * attr);
extern void fat_truncate(struct inode *inode);
extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat);
diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h
index 88766e43e121..9f2d76347f19 100644
--- a/include/linux/ncp_fs.h
+++ b/include/linux/ncp_fs.h
@@ -204,6 +204,7 @@ void ncp_update_inode2(struct inode *, struct ncp_entry_info *);
/* linux/fs/ncpfs/dir.c */
extern const struct inode_operations ncp_dir_inode_operations;
extern const struct file_operations ncp_dir_operations;
+extern struct dentry_operations ncp_root_dentry_operations;
int ncp_conn_logged_in(struct super_block *);
int ncp_date_dos2unix(__le16 time, __le16 date);
void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
@@ -223,6 +224,12 @@ int ncp_disconnect(struct ncp_server *server);
void ncp_lock_server(struct ncp_server *server);
void ncp_unlock_server(struct ncp_server *server);
+/* linux/fs/ncpfs/symlink.c */
+#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
+extern const struct address_space_operations ncp_symlink_aops;
+int ncp_symlink(struct inode*, struct dentry*, const char*);
+#endif
+
/* linux/fs/ncpfs/file.c */
extern const struct inode_operations ncp_file_inode_operations;
extern const struct file_operations ncp_file_operations;
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 905e18f4b412..848025cd7087 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -14,6 +14,8 @@
* bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
* For details of node_remap(), see bitmap_bitremap in lib/bitmap.c.
* For details of nodes_remap(), see bitmap_remap in lib/bitmap.c.
+ * For details of nodes_onto(), see bitmap_onto in lib/bitmap.c.
+ * For details of nodes_fold(), see bitmap_fold in lib/bitmap.c.
*
* The available nodemask operations are:
*
@@ -55,7 +57,9 @@
* int nodelist_scnprintf(buf, len, mask) Format nodemask as list for printing
* int nodelist_parse(buf, map) Parse ascii string as nodelist
* int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
- * int nodes_remap(dst, src, old, new) *dst = map(old, new)(dst)
+ * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src)
+ * void nodes_onto(dst, orig, relmap) *dst = orig relative to relmap
+ * void nodes_fold(dst, orig, sz) dst bits = orig bits mod sz
*
* for_each_node_mask(node, mask) for-loop node over mask
*
@@ -326,6 +330,22 @@ static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
}
+#define nodes_onto(dst, orig, relmap) \
+ __nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES)
+static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
+ const nodemask_t *relmapp, int nbits)
+{
+ bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
+}
+
+#define nodes_fold(dst, orig, sz) \
+ __nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES)
+static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
+ int sz, int nbits)
+{
+ bitmap_fold(dstp->bits, origp->bits, sz, nbits);
+}
+
#if MAX_NUMNODES > 1
#define for_each_node_mask(node, mask) \
for ((node) = first_node(mask); \
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index f4df40038f0c..20dfed590183 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -247,6 +247,7 @@ extern struct blocking_notifier_head reboot_notifier_list;
#define VT_DEALLOCATE 0x0002 /* Console will be deallocated */
#define VT_WRITE 0x0003 /* A char got output */
#define VT_UPDATE 0x0004 /* A bigger update occurred */
+#define VT_PREWRITE 0x0005 /* A char is about to be written to the console */
#endif /* __KERNEL__ */
#endif /* _LINUX_NOTIFIER_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 3852436b652a..a7979baf1e39 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -23,8 +23,8 @@ enum oom_constraint {
CONSTRAINT_MEMORY_POLICY,
};
-extern int try_set_zone_oom(struct zonelist *zonelist);
-extern void clear_zonelist_oom(struct zonelist *zonelist);
+extern int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_flags);
+extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order);
extern int register_oom_notifier(struct notifier_block *nb);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index b5b30f1c1e59..590cff32415d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -6,7 +6,10 @@
#define PAGE_FLAGS_H
#include <linux/types.h>
+#ifndef __GENERATING_BOUNDS_H
#include <linux/mm_types.h>
+#include <linux/bounds.h>
+#endif /* !__GENERATING_BOUNDS_H */
/*
* Various page->flags bits:
@@ -59,77 +62,138 @@
* extends from the high bits downwards.
*
* | FIELD | ... | FLAGS |
- * N-1 ^ 0
- * (N-FLAGS_RESERVED)
+ * N-1 ^ 0
+ * (NR_PAGEFLAGS)
*
- * The fields area is reserved for fields mapping zone, node and SPARSEMEM
- * section. The boundry between these two areas is defined by
- * FLAGS_RESERVED which defines the width of the fields section
- * (see linux/mmzone.h). New flags must _not_ overlap with this area.
+ * The fields area is reserved for fields mapping zone, node (for NUMA) and
+ * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
+ * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
*/
-#define PG_locked 0 /* Page is locked. Don't touch. */
-#define PG_error 1
-#define PG_referenced 2
-#define PG_uptodate 3
+enum pageflags {
+ PG_locked, /* Page is locked. Don't touch. */
+ PG_error,
+ PG_referenced,
+ PG_uptodate,
+ PG_dirty,
+ PG_lru,
+ PG_active,
+ PG_slab,
+ PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
+ PG_arch_1,
+ PG_reserved,
+ PG_private, /* If pagecache, has fs-private data */
+ PG_writeback, /* Page is under writeback */
+#ifdef CONFIG_PAGEFLAGS_EXTENDED
+ PG_head, /* A head page */
+ PG_tail, /* A tail page */
+#else
+ PG_compound, /* A compound page */
+#endif
+ PG_swapcache, /* Swap page: swp_entry_t in private */
+ PG_mappedtodisk, /* Has blocks allocated on-disk */
+ PG_reclaim, /* To be reclaimed asap */
+ PG_buddy, /* Page is free, on buddy lists */
+#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
+ PG_uncached, /* Page has been mapped as uncached */
+#endif
+ __NR_PAGEFLAGS
+};
+
+#ifndef __GENERATING_BOUNDS_H
+
+/*
+ * Macros to create function definitions for page flags
+ */
+#define TESTPAGEFLAG(uname, lname) \
+static inline int Page##uname(struct page *page) \
+ { return test_bit(PG_##lname, &page->flags); }
-#define PG_dirty 4
-#define PG_lru 5
-#define PG_active 6
-#define PG_slab 7 /* slab debug (Suparna wants this) */
+#define SETPAGEFLAG(uname, lname) \
+static inline void SetPage##uname(struct page *page) \
+ { set_bit(PG_##lname, &page->flags); }
-#define PG_owner_priv_1 8 /* Owner use. If pagecache, fs may use*/
-#define PG_arch_1 9
-#define PG_reserved 10
-#define PG_private 11 /* If pagecache, has fs-private data */
+#define CLEARPAGEFLAG(uname, lname) \
+static inline void ClearPage##uname(struct page *page) \
+ { clear_bit(PG_##lname, &page->flags); }
-#define PG_writeback 12 /* Page is under writeback */
-#define PG_compound 14 /* Part of a compound page */
-#define PG_swapcache 15 /* Swap page: swp_entry_t in private */
+#define __SETPAGEFLAG(uname, lname) \
+static inline void __SetPage##uname(struct page *page) \
+ { __set_bit(PG_##lname, &page->flags); }
-#define PG_mappedtodisk 16 /* Has blocks allocated on-disk */
-#define PG_reclaim 17 /* To be reclaimed asap */
-#define PG_buddy 19 /* Page is free, on buddy lists */
+#define __CLEARPAGEFLAG(uname, lname) \
+static inline void __ClearPage##uname(struct page *page) \
+ { __clear_bit(PG_##lname, &page->flags); }
+
+#define TESTSETFLAG(uname, lname) \
+static inline int TestSetPage##uname(struct page *page) \
+ { return test_and_set_bit(PG_##lname, &page->flags); }
+
+#define TESTCLEARFLAG(uname, lname) \
+static inline int TestClearPage##uname(struct page *page) \
+ { return test_and_clear_bit(PG_##lname, &page->flags); }
-/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
-#define PG_readahead PG_reclaim /* Reminder to do async read-ahead */
-/* PG_owner_priv_1 users should have descriptive aliases */
-#define PG_checked PG_owner_priv_1 /* Used by some filesystems */
-#define PG_pinned PG_owner_priv_1 /* Xen pinned pagetable */
+#define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
+ SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname)
+
+#define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
+ __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname)
+
+#define PAGEFLAG_FALSE(uname) \
+static inline int Page##uname(struct page *page) \
+ { return 0; }
+
+#define TESTSCFLAG(uname, lname) \
+ TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname)
+
+struct page; /* forward declaration */
+
+PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked)
+PAGEFLAG(Error, error)
+PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
+PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
+PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
+PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
+__PAGEFLAG(Slab, slab)
+PAGEFLAG(Checked, owner_priv_1) /* Used by some filesystems */
+PAGEFLAG(Pinned, owner_priv_1) TESTSCFLAG(Pinned, owner_priv_1) /* Xen */
+PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
+PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
+ __SETPAGEFLAG(Private, private)
-#if (BITS_PER_LONG > 32)
/*
- * 64-bit-only flags build down from bit 31
- *
- * 32 bit -------------------------------| FIELDS | FLAGS |
- * 64 bit | FIELDS | ?????? FLAGS |
- * 63 32 0
+ * Only test-and-set exist for PG_writeback. The unconditional operators are
+ * risky: they bypass page accounting.
*/
-#define PG_uncached 31 /* Page has been mapped as uncached */
-#endif
+TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
+__PAGEFLAG(Buddy, buddy)
+PAGEFLAG(MappedToDisk, mappedtodisk)
+/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
+PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
+PAGEFLAG(Readahead, reclaim) /* Reminder to do async read-ahead */
+
+#ifdef CONFIG_HIGHMEM
/*
- * Manipulation of page state flags
+ * Must use a macro here due to header dependency issues. page_zone() is not
+ * available at this point.
*/
-#define PageLocked(page) \
- test_bit(PG_locked, &(page)->flags)
-#define SetPageLocked(page) \
- set_bit(PG_locked, &(page)->flags)
-#define TestSetPageLocked(page) \
- test_and_set_bit(PG_locked, &(page)->flags)
-#define ClearPageLocked(page) \
- clear_bit(PG_locked, &(page)->flags)
-#define TestClearPageLocked(page) \
- test_and_clear_bit(PG_locked, &(page)->flags)
-
-#define PageError(page) test_bit(PG_error, &(page)->flags)
-#define SetPageError(page) set_bit(PG_error, &(page)->flags)
-#define ClearPageError(page) clear_bit(PG_error, &(page)->flags)
-
-#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
-#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
-#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
-#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
+#define PageHighMem(__p) is_highmem(page_zone(__p))
+#else
+PAGEFLAG_FALSE(HighMem)
+#endif
+
+#ifdef CONFIG_SWAP
+PAGEFLAG(SwapCache, swapcache)
+#else
+PAGEFLAG_FALSE(SwapCache)
+#endif
+
+#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
+PAGEFLAG(Uncached, uncached)
+#else
+PAGEFLAG_FALSE(Uncached)
+#endif
static inline int PageUptodate(struct page *page)
{
@@ -177,97 +241,59 @@ static inline void SetPageUptodate(struct page *page)
#endif
}
-#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
-
-#define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
-#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
-#define TestSetPageDirty(page) test_and_set_bit(PG_dirty, &(page)->flags)
-#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
-#define __ClearPageDirty(page) __clear_bit(PG_dirty, &(page)->flags)
-#define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags)
-
-#define PageLRU(page) test_bit(PG_lru, &(page)->flags)
-#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags)
-#define ClearPageLRU(page) clear_bit(PG_lru, &(page)->flags)
-#define __ClearPageLRU(page) __clear_bit(PG_lru, &(page)->flags)
-
-#define PageActive(page) test_bit(PG_active, &(page)->flags)
-#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
-#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
-#define __ClearPageActive(page) __clear_bit(PG_active, &(page)->flags)
-
-#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
-#define __SetPageSlab(page) __set_bit(PG_slab, &(page)->flags)
-#define __ClearPageSlab(page) __clear_bit(PG_slab, &(page)->flags)
-
-#ifdef CONFIG_HIGHMEM
-#define PageHighMem(page) is_highmem(page_zone(page))
-#else
-#define PageHighMem(page) 0 /* needed to optimize away at compile time */
-#endif
+CLEARPAGEFLAG(Uptodate, uptodate)
-#define PageChecked(page) test_bit(PG_checked, &(page)->flags)
-#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
-#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
-
-#define PagePinned(page) test_bit(PG_pinned, &(page)->flags)
-#define SetPagePinned(page) set_bit(PG_pinned, &(page)->flags)
-#define ClearPagePinned(page) clear_bit(PG_pinned, &(page)->flags)
+extern void cancel_dirty_page(struct page *page, unsigned int account_size);
-#define PageReserved(page) test_bit(PG_reserved, &(page)->flags)
-#define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags)
-#define ClearPageReserved(page) clear_bit(PG_reserved, &(page)->flags)
-#define __ClearPageReserved(page) __clear_bit(PG_reserved, &(page)->flags)
+int test_clear_page_writeback(struct page *page);
+int test_set_page_writeback(struct page *page);
-#define SetPagePrivate(page) set_bit(PG_private, &(page)->flags)
-#define ClearPagePrivate(page) clear_bit(PG_private, &(page)->flags)
-#define PagePrivate(page) test_bit(PG_private, &(page)->flags)
-#define __SetPagePrivate(page) __set_bit(PG_private, &(page)->flags)
-#define __ClearPagePrivate(page) __clear_bit(PG_private, &(page)->flags)
+static inline void set_page_writeback(struct page *page)
+{
+ test_set_page_writeback(page);
+}
+#ifdef CONFIG_PAGEFLAGS_EXTENDED
/*
- * Only test-and-set exist for PG_writeback. The unconditional operators are
- * risky: they bypass page accounting.
+ * System with lots of page flags available. This allows separate
+ * flags for PageHead() and PageTail() checks of compound pages so that bit
+ * tests can be used in performance sensitive paths. PageCompound is
+ * generally not used in hot code paths.
*/
-#define PageWriteback(page) test_bit(PG_writeback, &(page)->flags)
-#define TestSetPageWriteback(page) test_and_set_bit(PG_writeback, \
- &(page)->flags)
-#define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback, \
- &(page)->flags)
+__PAGEFLAG(Head, head)
+__PAGEFLAG(Tail, tail)
-#define PageBuddy(page) test_bit(PG_buddy, &(page)->flags)
-#define __SetPageBuddy(page) __set_bit(PG_buddy, &(page)->flags)
-#define __ClearPageBuddy(page) __clear_bit(PG_buddy, &(page)->flags)
-
-#define PageMappedToDisk(page) test_bit(PG_mappedtodisk, &(page)->flags)
-#define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags)
-#define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags)
-
-#define PageReadahead(page) test_bit(PG_readahead, &(page)->flags)
-#define SetPageReadahead(page) set_bit(PG_readahead, &(page)->flags)
-#define ClearPageReadahead(page) clear_bit(PG_readahead, &(page)->flags)
-
-#define PageReclaim(page) test_bit(PG_reclaim, &(page)->flags)
-#define SetPageReclaim(page) set_bit(PG_reclaim, &(page)->flags)
-#define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags)
-#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags)
+static inline int PageCompound(struct page *page)
+{
+ return page->flags & ((1L << PG_head) | (1L << PG_tail));
-#define PageCompound(page) test_bit(PG_compound, &(page)->flags)
-#define __SetPageCompound(page) __set_bit(PG_compound, &(page)->flags)
-#define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags)
+}
+#else
+/*
+ * Reduce page flag use as much as possible by overlapping
+ * compound page flags with the flags used for page cache pages. Possible
+ * because PageCompound is always set for compound pages and not for
+ * pages on the LRU and/or pagecache.
+ */
+TESTPAGEFLAG(Compound, compound)
+__PAGEFLAG(Head, compound)
/*
* PG_reclaim is used in combination with PG_compound to mark the
- * head and tail of a compound page
+ * head and tail of a compound page. This saves one page flag
+ * but makes it impossible to use compound pages for the page cache.
+ * The PG_reclaim bit would have to be used for reclaim or readahead
+ * if compound pages enter the page cache.
*
* PG_compound & PG_reclaim => Tail page
* PG_compound & ~PG_reclaim => Head page
*/
-
#define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
-#define PageTail(page) (((page)->flags & PG_head_tail_mask) \
- == PG_head_tail_mask)
+static inline int PageTail(struct page *page)
+{
+ return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
+}
static inline void __SetPageTail(struct page *page)
{
@@ -279,33 +305,6 @@ static inline void __ClearPageTail(struct page *page)
page->flags &= ~PG_head_tail_mask;
}
-#define PageHead(page) (((page)->flags & PG_head_tail_mask) \
- == (1L << PG_compound))
-#define __SetPageHead(page) __SetPageCompound(page)
-#define __ClearPageHead(page) __ClearPageCompound(page)
-
-#ifdef CONFIG_SWAP
-#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags)
-#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags)
-#define ClearPageSwapCache(page) clear_bit(PG_swapcache, &(page)->flags)
-#else
-#define PageSwapCache(page) 0
-#endif
-
-#define PageUncached(page) test_bit(PG_uncached, &(page)->flags)
-#define SetPageUncached(page) set_bit(PG_uncached, &(page)->flags)
-#define ClearPageUncached(page) clear_bit(PG_uncached, &(page)->flags)
-
-struct page; /* forward declaration */
-
-extern void cancel_dirty_page(struct page *page, unsigned int account_size);
-
-int test_clear_page_writeback(struct page *page);
-int test_set_page_writeback(struct page *page);
-
-static inline void set_page_writeback(struct page *page)
-{
- test_set_page_writeback(page);
-}
-
+#endif /* !PAGEFLAGS_EXTENDED */
+#endif /* !__GENERATING_BOUNDS_H */
#endif /* PAGE_FLAGS_H */
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index 5c80b1939636..5ad79198d6f9 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -16,7 +16,8 @@
# define PR_UNALIGN_NOPRINT 1 /* silently fix up unaligned user accesses */
# define PR_UNALIGN_SIGBUS 2 /* generate SIGBUS on unaligned user access */
-/* Get/set whether or not to drop capabilities on setuid() away from uid 0 */
+/* Get/set whether or not to drop capabilities on setuid() away from
+ * uid 0 (as per security/commoncap.c) */
#define PR_GET_KEEPCAPS 7
#define PR_SET_KEEPCAPS 8
@@ -63,7 +64,7 @@
#define PR_GET_SECCOMP 21
#define PR_SET_SECCOMP 22
-/* Get/set the capability bounding set */
+/* Get/set the capability bounding set (as per security/commoncap.c) */
#define PR_CAPBSET_READ 23
#define PR_CAPBSET_DROP 24
@@ -73,4 +74,8 @@
# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */
# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */
+/* Get/set securebits (as per security/commoncap.c) */
+#define PR_GET_SECUREBITS 27
+#define PR_SET_SECUREBITS 28
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/quota.h b/include/linux/quota.h
index eb560d031acd..52e49dce6584 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -202,10 +202,14 @@ struct quota_format_type;
struct mem_dqinfo {
struct quota_format_type *dqi_format;
+ int dqi_fmt_id; /* Id of the dqi_format - used when turning
+ * quotas on after remount RW */
struct list_head dqi_dirty_list; /* List of dirty dquots */
unsigned long dqi_flags;
unsigned int dqi_bgrace;
unsigned int dqi_igrace;
+ qsize_t dqi_maxblimit;
+ qsize_t dqi_maxilimit;
union {
struct v1_mem_dqinfo v1_i;
struct v2_mem_dqinfo v2_i;
@@ -296,8 +300,8 @@ struct dquot_operations {
/* Operations handling requests from userspace */
struct quotactl_ops {
- int (*quota_on)(struct super_block *, int, int, char *);
- int (*quota_off)(struct super_block *, int);
+ int (*quota_on)(struct super_block *, int, int, char *, int);
+ int (*quota_off)(struct super_block *, int, int);
int (*quota_sync)(struct super_block *, int);
int (*get_info)(struct super_block *, int, struct if_dqinfo *);
int (*set_info)(struct super_block *, int, struct if_dqinfo *);
@@ -318,6 +322,10 @@ struct quota_format_type {
#define DQUOT_USR_ENABLED 0x01 /* User diskquotas enabled */
#define DQUOT_GRP_ENABLED 0x02 /* Group diskquotas enabled */
+#define DQUOT_USR_SUSPENDED 0x04 /* User diskquotas are off, but
+ * we have necessary info in
+ * memory to turn them on */
+#define DQUOT_GRP_SUSPENDED 0x08 /* The same for group quotas */
struct quota_info {
unsigned int flags; /* Flags for diskquotas on this device */
@@ -329,17 +337,16 @@ struct quota_info {
struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
};
-/* Inline would be better but we need to dereference super_block which is not defined yet */
-int mark_dquot_dirty(struct dquot *dquot);
-
-#define dquot_dirty(dquot) test_bit(DQ_MOD_B, &(dquot)->dq_flags)
-
#define sb_has_quota_enabled(sb, type) ((type)==USRQUOTA ? \
(sb_dqopt(sb)->flags & DQUOT_USR_ENABLED) : (sb_dqopt(sb)->flags & DQUOT_GRP_ENABLED))
#define sb_any_quota_enabled(sb) (sb_has_quota_enabled(sb, USRQUOTA) | \
sb_has_quota_enabled(sb, GRPQUOTA))
+#define sb_has_quota_suspended(sb, type) \
+ ((type) == USRQUOTA ? (sb_dqopt(sb)->flags & DQUOT_USR_SUSPENDED) : \
+ (sb_dqopt(sb)->flags & DQUOT_GRP_SUSPENDED))
+
int register_quota_format(struct quota_format_type *fmt);
void unregister_quota_format(struct quota_format_type *fmt);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 5110201a4159..f86702053853 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -37,11 +37,11 @@ extern int dquot_release(struct dquot *dquot);
extern int dquot_commit_info(struct super_block *sb, int type);
extern int dquot_mark_dquot_dirty(struct dquot *dquot);
-extern int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path);
+extern int vfs_quota_on(struct super_block *sb, int type, int format_id,
+ char *path, int remount);
extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
int format_id, int type);
-extern int vfs_quota_off(struct super_block *sb, int type);
-#define vfs_quota_off_mount(sb, type) vfs_quota_off(sb, type)
+extern int vfs_quota_off(struct super_block *sb, int type, int remount);
extern int vfs_quota_sync(struct super_block *sb, int type);
extern int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
extern int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
@@ -59,7 +59,7 @@ extern struct quotactl_ops vfs_quotactl_ops;
/* It is better to call this function outside of any transaction as it might
* need a lot of space in journal for dquot structure allocation. */
-static __inline__ void DQUOT_INIT(struct inode *inode)
+static inline void DQUOT_INIT(struct inode *inode)
{
BUG_ON(!inode->i_sb);
if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode))
@@ -67,7 +67,7 @@ static __inline__ void DQUOT_INIT(struct inode *inode)
}
/* The same as with DQUOT_INIT */
-static __inline__ void DQUOT_DROP(struct inode *inode)
+static inline void DQUOT_DROP(struct inode *inode)
{
/* Here we can get arbitrary inode from clear_inode() so we have
* to be careful. OTOH we don't need locking as quota operations
@@ -90,7 +90,7 @@ static __inline__ void DQUOT_DROP(struct inode *inode)
/* The following allocation/freeing/transfer functions *must* be called inside
* a transaction (deadlocks possible otherwise) */
-static __inline__ int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
+static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_enabled(inode->i_sb)) {
/* Used space is updated in alloc_space() */
@@ -102,7 +102,7 @@ static __inline__ int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t
return 0;
}
-static __inline__ int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr)
+static inline int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr)
{
int ret;
if (!(ret = DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr)))
@@ -110,7 +110,7 @@ static __inline__ int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr)
return ret;
}
-static __inline__ int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
+static inline int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_enabled(inode->i_sb)) {
/* Used space is updated in alloc_space() */
@@ -122,7 +122,7 @@ static __inline__ int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
return 0;
}
-static __inline__ int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr)
+static inline int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr)
{
int ret;
if (!(ret = DQUOT_ALLOC_SPACE_NODIRTY(inode, nr)))
@@ -130,7 +130,7 @@ static __inline__ int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr)
return ret;
}
-static __inline__ int DQUOT_ALLOC_INODE(struct inode *inode)
+static inline int DQUOT_ALLOC_INODE(struct inode *inode)
{
if (sb_any_quota_enabled(inode->i_sb)) {
DQUOT_INIT(inode);
@@ -140,7 +140,7 @@ static __inline__ int DQUOT_ALLOC_INODE(struct inode *inode)
return 0;
}
-static __inline__ void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
+static inline void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_enabled(inode->i_sb))
inode->i_sb->dq_op->free_space(inode, nr);
@@ -148,19 +148,19 @@ static __inline__ void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
inode_sub_bytes(inode, nr);
}
-static __inline__ void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr)
+static inline void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr)
{
DQUOT_FREE_SPACE_NODIRTY(inode, nr);
mark_inode_dirty(inode);
}
-static __inline__ void DQUOT_FREE_INODE(struct inode *inode)
+static inline void DQUOT_FREE_INODE(struct inode *inode)
{
if (sb_any_quota_enabled(inode->i_sb))
inode->i_sb->dq_op->free_inode(inode, 1);
}
-static __inline__ int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr)
+static inline int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr)
{
if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) {
DQUOT_INIT(inode);
@@ -171,14 +171,32 @@ static __inline__ int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr)
}
/* The following two functions cannot be called inside a transaction */
-#define DQUOT_SYNC(sb) sync_dquots(sb, -1)
+static inline void DQUOT_SYNC(struct super_block *sb)
+{
+ sync_dquots(sb, -1);
+}
-static __inline__ int DQUOT_OFF(struct super_block *sb)
+static inline int DQUOT_OFF(struct super_block *sb, int remount)
{
int ret = -ENOSYS;
- if (sb_any_quota_enabled(sb) && sb->s_qcop && sb->s_qcop->quota_off)
- ret = sb->s_qcop->quota_off(sb, -1);
+ if (sb->s_qcop && sb->s_qcop->quota_off)
+ ret = sb->s_qcop->quota_off(sb, -1, remount);
+ return ret;
+}
+
+static inline int DQUOT_ON_REMOUNT(struct super_block *sb)
+{
+ int cnt;
+ int ret = 0, err;
+
+ if (!sb->s_qcop || !sb->s_qcop->quota_on)
+ return -ENOSYS;
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1);
+ if (err < 0 && !ret)
+ ret = err;
+ }
return ret;
}
@@ -189,13 +207,43 @@ static __inline__ int DQUOT_OFF(struct super_block *sb)
*/
#define sb_dquot_ops (NULL)
#define sb_quotactl_ops (NULL)
-#define DQUOT_INIT(inode) do { } while(0)
-#define DQUOT_DROP(inode) do { } while(0)
-#define DQUOT_ALLOC_INODE(inode) (0)
-#define DQUOT_FREE_INODE(inode) do { } while(0)
-#define DQUOT_SYNC(sb) do { } while(0)
-#define DQUOT_OFF(sb) do { } while(0)
-#define DQUOT_TRANSFER(inode, iattr) (0)
+
+static inline void DQUOT_INIT(struct inode *inode)
+{
+}
+
+static inline void DQUOT_DROP(struct inode *inode)
+{
+}
+
+static inline int DQUOT_ALLOC_INODE(struct inode *inode)
+{
+ return 0;
+}
+
+static inline void DQUOT_FREE_INODE(struct inode *inode)
+{
+}
+
+static inline void DQUOT_SYNC(struct super_block *sb)
+{
+}
+
+static inline int DQUOT_OFF(struct super_block *sb, int remount)
+{
+ return 0;
+}
+
+static inline int DQUOT_ON_REMOUNT(struct super_block *sb)
+{
+ return 0;
+}
+
+static inline int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr)
+{
+ return 0;
+}
+
static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
{
inode_add_bytes(inode, nr);
@@ -235,11 +283,38 @@ static inline void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr)
#endif /* CONFIG_QUOTA */
-#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) DQUOT_PREALLOC_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
-#define DQUOT_PREALLOC_BLOCK(inode, nr) DQUOT_PREALLOC_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
-#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) DQUOT_ALLOC_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
-#define DQUOT_ALLOC_BLOCK(inode, nr) DQUOT_ALLOC_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
-#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) DQUOT_FREE_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
-#define DQUOT_FREE_BLOCK(inode, nr) DQUOT_FREE_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
+static inline int DQUOT_PREALLOC_BLOCK_NODIRTY(struct inode *inode, qsize_t nr)
+{
+ return DQUOT_PREALLOC_SPACE_NODIRTY(inode,
+ nr << inode->i_sb->s_blocksize_bits);
+}
+
+static inline int DQUOT_PREALLOC_BLOCK(struct inode *inode, qsize_t nr)
+{
+ return DQUOT_PREALLOC_SPACE(inode,
+ nr << inode->i_sb->s_blocksize_bits);
+}
+
+static inline int DQUOT_ALLOC_BLOCK_NODIRTY(struct inode *inode, qsize_t nr)
+{
+ return DQUOT_ALLOC_SPACE_NODIRTY(inode,
+ nr << inode->i_sb->s_blocksize_bits);
+}
+
+static inline int DQUOT_ALLOC_BLOCK(struct inode *inode, qsize_t nr)
+{
+ return DQUOT_ALLOC_SPACE(inode,
+ nr << inode->i_sb->s_blocksize_bits);
+}
+
+static inline void DQUOT_FREE_BLOCK_NODIRTY(struct inode *inode, qsize_t nr)
+{
+ DQUOT_FREE_SPACE_NODIRTY(inode, nr << inode->i_sb->s_blocksize_bits);
+}
+
+static inline void DQUOT_FREE_BLOCK(struct inode *inode, qsize_t nr)
+{
+ DQUOT_FREE_SPACE(inode, nr << inode->i_sb->s_blocksize_bits);
+}
#endif /* _LINUX_QUOTAOPS_ */
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
index 93678f57ccbe..f0827d31ae6f 100644
--- a/include/linux/raid/raid5.h
+++ b/include/linux/raid/raid5.h
@@ -252,6 +252,8 @@ struct r6_state {
#define STRIPE_EXPANDING 9
#define STRIPE_EXPAND_SOURCE 10
#define STRIPE_EXPAND_READY 11
+#define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */
+#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */
/*
* Operations flags (in issue order)
*/
@@ -316,12 +318,17 @@ struct raid5_private_data {
int previous_raid_disks;
struct list_head handle_list; /* stripes needing handling */
+ struct list_head hold_list; /* preread ready stripes */
struct list_head delayed_list; /* stripes that have plugged requests */
struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
struct bio *retry_read_aligned; /* currently retrying aligned bios */
struct bio *retry_read_aligned_list; /* aligned bios retry list */
atomic_t preread_active_stripes; /* stripes with scheduled io */
atomic_t active_aligned_reads;
+ atomic_t pending_full_writes; /* full write backlog */
+ int bypass_count; /* bypassed prereads */
+ int bypass_threshold; /* preread nice */
+ struct list_head *last_hold; /* detect hold_list promotions */
atomic_t reshape_stripes; /* stripes with pending writes for reshape */
/* unfortunately we need two cache names as we temporarily have
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 8e7eff2cd0ab..4aacaeecb56f 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -2176,6 +2176,7 @@ int reiserfs_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
long reiserfs_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
+int reiserfs_unpack(struct inode *inode, struct file *filp);
/* ioctl's command */
#define REISERFS_IOC_UNPACK _IOW(0xCD,1,long)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d0bd97044abd..024d72b47a0c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -68,7 +68,6 @@ struct sched_param {
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/signal.h>
-#include <linux/securebits.h>
#include <linux/fs_struct.h>
#include <linux/compiler.h>
#include <linux/completion.h>
@@ -1133,7 +1132,7 @@ struct task_struct {
gid_t gid,egid,sgid,fsgid;
struct group_info *group_info;
kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
- unsigned keep_capabilities:1;
+ unsigned securebits;
struct user_struct *user;
#ifdef CONFIG_KEYS
struct key *request_key_auth; /* assumed request_key authority */
@@ -1798,6 +1797,8 @@ extern void mmput(struct mm_struct *);
extern struct mm_struct *get_task_mm(struct task_struct *task);
/* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *);
+/* Allocate a new mm structure and copy contents from tsk->mm */
+extern struct mm_struct *dup_mm(struct task_struct *tsk);
extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
extern void flush_thread(void);
diff --git a/include/linux/securebits.h b/include/linux/securebits.h
index 5b0617840fa4..c1f19dbceb05 100644
--- a/include/linux/securebits.h
+++ b/include/linux/securebits.h
@@ -3,28 +3,39 @@
#define SECUREBITS_DEFAULT 0x00000000
-extern unsigned securebits;
-
/* When set UID 0 has no special privileges. When unset, we support
inheritance of root-permissions and suid-root executable under
compatibility mode. We raise the effective and inheritable bitmasks
*of the executable file* if the effective uid of the new process is
0. If the real uid is 0, we raise the inheritable bitmask of the
executable file. */
-#define SECURE_NOROOT 0
+#define SECURE_NOROOT 0
+#define SECURE_NOROOT_LOCKED 1 /* make bit-0 immutable */
/* When set, setuid to/from uid 0 does not trigger capability-"fixes"
to be compatible with old programs relying on set*uid to loose
privileges. When unset, setuid doesn't change privileges. */
-#define SECURE_NO_SETUID_FIXUP 2
+#define SECURE_NO_SETUID_FIXUP 2
+#define SECURE_NO_SETUID_FIXUP_LOCKED 3 /* make bit-2 immutable */
+
+/* When set, a process can retain its capabilities even after
+ transitioning to a non-root user (the set-uid fixup suppressed by
+ bit 2). Bit-4 is cleared when a process calls exec(); setting both
+ bit 4 and 5 will create a barrier through exec that no exec()'d
+ child can use this feature again. */
+#define SECURE_KEEP_CAPS 4
+#define SECURE_KEEP_CAPS_LOCKED 5 /* make bit-4 immutable */
/* Each securesetting is implemented using two bits. One bit specify
whether the setting is on or off. The other bit specify whether the
setting is fixed or not. A setting which is fixed cannot be changed
from user-level. */
+#define issecure_mask(X) (1 << (X))
+#define issecure(X) (issecure_mask(X) & current->securebits)
-#define issecure(X) ( (1 << (X+1)) & SECUREBITS_DEFAULT ? \
- (1 << (X)) & SECUREBITS_DEFAULT : \
- (1 << (X)) & securebits )
+#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \
+ issecure_mask(SECURE_NO_SETUID_FIXUP) | \
+ issecure_mask(SECURE_KEEP_CAPS))
+#define SECURE_ALL_LOCKS (SECURE_ALL_BITS << 1)
#endif /* !_LINUX_SECUREBITS_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index 53a34539382a..e6299e50e210 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -34,8 +34,6 @@
#include <linux/xfrm.h>
#include <net/flow.h>
-extern unsigned securebits;
-
/* Maximum number of letters for an LSM name string */
#define SECURITY_NAME_MAX 10
@@ -61,6 +59,8 @@ extern int cap_inode_need_killpriv(struct dentry *dentry);
extern int cap_inode_killpriv(struct dentry *dentry);
extern int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags);
extern void cap_task_reparent_to_init (struct task_struct *p);
+extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5, long *rc_p);
extern int cap_task_setscheduler (struct task_struct *p, int policy, struct sched_param *lp);
extern int cap_task_setioprio (struct task_struct *p, int ioprio);
extern int cap_task_setnice (struct task_struct *p, int nice);
@@ -720,7 +720,9 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @arg3 contains a argument.
* @arg4 contains a argument.
* @arg5 contains a argument.
- * Return 0 if permission is granted.
+ * @rc_p contains a pointer to communicate back the forced return code
+ * Return 0 if permission is granted, and non-zero if the security module
+ * has taken responsibility (setting *rc_p) for the prctl call.
* @task_reparent_to_init:
* Set the security attributes in @p->security for a kernel thread that
* is being reparented to the init task.
@@ -1420,7 +1422,7 @@ struct security_operations {
int (*task_wait) (struct task_struct * p);
int (*task_prctl) (int option, unsigned long arg2,
unsigned long arg3, unsigned long arg4,
- unsigned long arg5);
+ unsigned long arg5, long *rc_p);
void (*task_reparent_to_init) (struct task_struct * p);
void (*task_to_inode)(struct task_struct *p, struct inode *inode);
@@ -1684,7 +1686,7 @@ int security_task_kill(struct task_struct *p, struct siginfo *info,
int sig, u32 secid);
int security_task_wait(struct task_struct *p);
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
- unsigned long arg4, unsigned long arg5);
+ unsigned long arg4, unsigned long arg5, long *rc_p);
void security_task_reparent_to_init(struct task_struct *p);
void security_task_to_inode(struct task_struct *p, struct inode *inode);
int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag);
@@ -2271,9 +2273,9 @@ static inline int security_task_wait (struct task_struct *p)
static inline int security_task_prctl (int option, unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
- unsigned long arg5)
+ unsigned long arg5, long *rc_p)
{
- return 0;
+ return cap_task_prctl(option, arg2, arg3, arg3, arg5, rc_p);
}
static inline void security_task_reparent_to_init (struct task_struct *p)
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 00b65c0a82ca..3d37c94abbc8 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -46,6 +46,7 @@ enum {
PLAT8250_DEV_HUB6,
PLAT8250_DEV_MCA,
PLAT8250_DEV_AU1X00,
+ PLAT8250_DEV_SM501,
};
/*
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 8d5fb36ea047..f2d12d5a21b8 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -34,8 +34,7 @@ struct shmem_sb_info {
uid_t uid; /* Mount uid for root directory */
gid_t gid; /* Mount gid for root directory */
mode_t mode; /* Mount mode for root directory */
- int policy; /* Default NUMA memory alloc policy */
- nodemask_t policy_nodes; /* nodemask for preferred and bind */
+ struct mempolicy *mpol; /* default memory policy for mappings */
};
static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 1d7d4c5797ee..a6977423baf7 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -12,11 +12,22 @@
#include <asm/errno.h>
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
+extern void pm_set_vt_switch(int);
extern int pm_prepare_console(void);
extern void pm_restore_console(void);
#else
-static inline int pm_prepare_console(void) { return 0; }
-static inline void pm_restore_console(void) {}
+static inline void pm_set_vt_switch(int do_switch)
+{
+}
+
+static inline int pm_prepare_console(void)
+{
+ return 0;
+}
+
+static inline void pm_restore_console(void)
+{
+}
#endif
typedef int __bitwise suspend_state_t;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 878459ae0454..0b3377650c85 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -177,11 +177,11 @@ extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void);
extern int lru_add_drain_all(void);
-extern int rotate_reclaimable_page(struct page *page);
+extern void rotate_reclaimable_page(struct page *page);
extern void swap_setup(void);
/* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask);
diff --git a/include/linux/synclink.h b/include/linux/synclink.h
index 5562fbf72095..45f6bc82d317 100644
--- a/include/linux/synclink.h
+++ b/include/linux/synclink.h
@@ -13,10 +13,6 @@
#define _SYNCLINK_H_
#define SYNCLINK_H_VERSION 3.6
-#define BOOLEAN int
-#define TRUE 1
-#define FALSE 0
-
#define BIT0 0x0001
#define BIT1 0x0002
#define BIT2 0x0004
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 03378e3515b3..add3c5a40827 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -32,7 +32,7 @@ struct attribute {
struct attribute_group {
const char *name;
- int (*is_visible)(struct kobject *,
+ mode_t (*is_visible)(struct kobject *,
struct attribute *, int);
struct attribute **attrs;
};
@@ -105,6 +105,8 @@ void sysfs_remove_link(struct kobject *kobj, const char *name);
int __must_check sysfs_create_group(struct kobject *kobj,
const struct attribute_group *grp);
+int sysfs_update_group(struct kobject *kobj,
+ const struct attribute_group *grp);
void sysfs_remove_group(struct kobject *kobj,
const struct attribute_group *grp);
int sysfs_add_file_to_group(struct kobject *kobj,
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index ce8e7da05807..364789aae9f3 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -31,6 +31,7 @@ struct vm_struct {
struct page **pages;
unsigned int nr_pages;
unsigned long phys_addr;
+ void *caller;
};
/*
@@ -66,6 +67,8 @@ static inline size_t get_vm_area_size(const struct vm_struct *area)
}
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
+extern struct vm_struct *get_vm_area_caller(unsigned long size,
+ unsigned long flags, void *caller);
extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end);
extern struct vm_struct *get_vm_area_node(unsigned long size,
@@ -87,4 +90,6 @@ extern void free_vm_area(struct vm_struct *area);
extern rwlock_t vmlist_lock;
extern struct vm_struct *vmlist;
+extern const struct seq_operations vmalloc_op;
+
#endif /* _LINUX_VMALLOC_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 9f1b4b46151e..e83b69346d23 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -25,6 +25,7 @@
#define HIGHMEM_ZONE(xx)
#endif
+
#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
@@ -37,6 +38,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGSCAN_DIRECT),
PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+#ifdef CONFIG_HUGETLB_PAGE
+ HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
+#endif
NR_VM_EVENT_ITEMS
};
@@ -174,7 +178,7 @@ static inline unsigned long node_page_state(int node,
zone_page_state(&zones[ZONE_MOVABLE], item);
}
-extern void zone_statistics(struct zonelist *, struct zone *);
+extern void zone_statistics(struct zone *, struct zone *);
#else
diff --git a/include/net/compat.h b/include/net/compat.h
index 406db242f73a..05fa5d0254ab 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -40,4 +40,7 @@ extern int put_cmsg_compat(struct msghdr*, int, int, int, void *);
extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int);
+extern int compat_mc_setsockopt(struct sock *, int, int, char __user *, int,
+ int (*)(struct sock *, int, int, char __user *, int));
+
#endif /* NET_COMPAT_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index b8b19e2f57bb..f6a9fe0ef09c 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -181,7 +181,8 @@ struct scsi_device {
sdev_printk(prefix, (scmd)->device, fmt, ##a)
enum scsi_target_state {
- STARGET_RUNNING = 1,
+ STARGET_CREATED = 1,
+ STARGET_RUNNING,
STARGET_DEL,
};
diff --git a/include/video/atmel_lcdc.h b/include/video/atmel_lcdc.h
index 336c20db87f8..ed64862c4e18 100644
--- a/include/video/atmel_lcdc.h
+++ b/include/video/atmel_lcdc.h
@@ -22,6 +22,15 @@
#ifndef __ATMEL_LCDC_H__
#define __ATMEL_LCDC_H__
+
+/* Way LCD wires are connected to the chip:
+ * Some Atmel chips use BGR color mode (instead of standard RGB)
+ * A swapped wiring onboard can bring to RGB mode.
+ */
+#define ATMEL_LCDC_WIRING_BGR 0
+#define ATMEL_LCDC_WIRING_RGB 1
+
+
/* LCD Controller info data structure, stored in device platform_data */
struct atmel_lcdfb_info {
spinlock_t lock;
@@ -39,8 +48,10 @@ struct atmel_lcdfb_info {
u8 bl_power;
#endif
bool lcdcon_is_backlight;
+ u8 saved_lcdcon;
u8 default_bpp;
+ u8 lcd_wiring_mode;
unsigned int default_lcdcon2;
unsigned int default_dmacon;
void (*atmel_lcdfb_power_control)(int on);
diff --git a/include/video/hecubafb.h b/include/video/hecubafb.h
new file mode 100644
index 000000000000..7b9952339762
--- /dev/null
+++ b/include/video/hecubafb.h
@@ -0,0 +1,51 @@
+/*
+ * hecubafb.h - definitions for the hecuba framebuffer driver
+ *
+ * Copyright (C) 2008 by Jaya Kumar
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ */
+
+#ifndef _LINUX_HECUBAFB_H_
+#define _LINUX_HECUBAFB_H_
+
+/* Apollo controller specific defines */
+#define APOLLO_START_NEW_IMG 0xA0
+#define APOLLO_STOP_IMG_DATA 0xA1
+#define APOLLO_DISPLAY_IMG 0xA2
+#define APOLLO_ERASE_DISPLAY 0xA3
+#define APOLLO_INIT_DISPLAY 0xA4
+
+/* Hecuba interface specific defines */
+#define HCB_WUP_BIT 0x01
+#define HCB_DS_BIT 0x02
+#define HCB_RW_BIT 0x04
+#define HCB_CD_BIT 0x08
+#define HCB_ACK_BIT 0x80
+
+/* struct used by hecuba. board specific stuff comes from *board */
+struct hecubafb_par {
+ struct fb_info *info;
+ struct hecuba_board *board;
+ void (*send_command)(struct hecubafb_par *, unsigned char);
+ void (*send_data)(struct hecubafb_par *, unsigned char);
+};
+
+/* board specific routines
+board drivers can implement wait_for_ack with interrupts if desired. if
+wait_for_ack is called with clear=0, then go to sleep and return when ack
+goes hi or if wait_for_ack with clear=1, then return when ack goes lo */
+struct hecuba_board {
+ struct module *owner;
+ void (*remove)(struct hecubafb_par *);
+ void (*set_ctl)(struct hecubafb_par *, unsigned char, unsigned char);
+ void (*set_data)(struct hecubafb_par *, unsigned char);
+ void (*wait_for_ack)(struct hecubafb_par *, int);
+ int (*init)(struct hecubafb_par *);
+};
+
+
+#endif
diff --git a/include/video/metronomefb.h b/include/video/metronomefb.h
new file mode 100644
index 000000000000..dab04b4fad7f
--- /dev/null
+++ b/include/video/metronomefb.h
@@ -0,0 +1,62 @@
+/*
+ * metronomefb.h - definitions for the metronome framebuffer driver
+ *
+ * Copyright (C) 2008 by Jaya Kumar
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ */
+
+#ifndef _LINUX_METRONOMEFB_H_
+#define _LINUX_METRONOMEFB_H_
+
+/* address and control descriptors used by metronome controller */
+struct metromem_desc {
+ u32 mFDADR0;
+ u32 mFSADR0;
+ u32 mFIDR0;
+ u32 mLDCMD0;
+};
+
+/* command structure used by metronome controller */
+struct metromem_cmd {
+ u16 opcode;
+ u16 args[((64-2)/2)];
+ u16 csum;
+};
+
+/* struct used by metronome. board specific stuff comes from *board */
+struct metronomefb_par {
+ unsigned char *metromem;
+ struct metromem_desc *metromem_desc;
+ struct metromem_cmd *metromem_cmd;
+ unsigned char *metromem_wfm;
+ unsigned char *metromem_img;
+ u16 *metromem_img_csum;
+ u16 *csum_table;
+ int metromemsize;
+ dma_addr_t metromem_dma;
+ dma_addr_t metromem_desc_dma;
+ struct fb_info *info;
+ struct metronome_board *board;
+ wait_queue_head_t waitq;
+ u8 frame_count;
+};
+
+/* board specific routines */
+struct metronome_board {
+ struct module *owner;
+ void (*free_irq)(struct fb_info *);
+ void (*init_gpio_regs)(struct metronomefb_par *);
+ void (*init_lcdc_regs)(struct metronomefb_par *);
+ void (*post_dma_setup)(struct metronomefb_par *);
+ void (*set_rst)(struct metronomefb_par *, int);
+ void (*set_stdby)(struct metronomefb_par *, int);
+ int (*met_wait_event)(struct metronomefb_par *);
+ int (*met_wait_event_intr)(struct metronomefb_par *);
+ int (*setup_irq)(struct fb_info *);
+};
+
+#endif
diff --git a/init/Kconfig b/init/Kconfig
index f1f22db74d5a..da071c4bbfb7 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -496,16 +496,12 @@ source "usr/Kconfig"
endif
config CC_OPTIMIZE_FOR_SIZE
- bool "Optimize for size (Look out for broken compilers!)"
+ bool "Optimize for size"
default y
- depends on ARM || H8300 || SUPERH || EXPERIMENTAL
help
Enabling this option will pass "-Os" instead of "-O2" to gcc
resulting in a smaller kernel.
- WARNING: some versions of gcc may generate incorrect code with this
- option. If problems are observed, a gcc upgrade may be needed.
-
If unsure, say N.
config SYSCTL
diff --git a/ipc/shm.c b/ipc/shm.c
index cc63fae02f06..e636910454a9 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -271,11 +271,9 @@ static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
if (sfd->vm_ops->get_policy)
pol = sfd->vm_ops->get_policy(vma, addr);
- else if (vma->vm_policy) {
+ else if (vma->vm_policy)
pol = vma->vm_policy;
- mpol_get(pol); /* get_vma_policy() expects this */
- } else
- pol = current->mempolicy;
+
return pol;
}
#endif
diff --git a/kernel/bounds.c b/kernel/bounds.c
new file mode 100644
index 000000000000..c3c55544db2f
--- /dev/null
+++ b/kernel/bounds.c
@@ -0,0 +1,23 @@
+/*
+ * Generate definitions needed by the preprocessor.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ */
+
+#define __GENERATING_BOUNDS_H
+/* Include headers that define the enum constants of interest */
+#include <linux/page-flags.h>
+#include <linux/mmzone.h>
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+void foo(void)
+{
+ /* The enum constants to put into include/linux/bounds.h */
+ DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
+ DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
+ /* End of constants */
+}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 8b35fbd8292f..024888bb9814 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -941,7 +941,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
cs->mems_generation = cpuset_mems_generation++;
mutex_unlock(&callback_mutex);
- cpuset_being_rebound = cs; /* causes mpol_copy() rebind */
+ cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
fudge = 10; /* spare mmarray[] slots */
fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */
@@ -992,7 +992,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
* rebind the vma mempolicies of each mm in mmarray[] to their
* new cpuset, and release that mm. The mpol_rebind_mm()
* call takes mmap_sem, which we couldn't take while holding
- * tasklist_lock. Forks can happen again now - the mpol_copy()
+ * tasklist_lock. Forks can happen again now - the mpol_dup()
* cpuset_being_rebound check will catch such forks, and rebind
* their vma mempolicies too. Because we still hold the global
* cgroup_mutex, we know that no other rebind effort will
@@ -1958,22 +1958,14 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
}
/**
- * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed
- * @zl: the zonelist to be checked
+ * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
+ * @nodemask: the nodemask to be checked
*
- * Are any of the nodes on zonelist zl allowed in current->mems_allowed?
+ * Are any of the nodes in the nodemask allowed in current->mems_allowed?
*/
-int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
+int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
{
- int i;
-
- for (i = 0; zl->zones[i]; i++) {
- int nid = zone_to_nid(zl->zones[i]);
-
- if (node_isset(nid, current->mems_allowed))
- return 1;
- }
- return 0;
+ return nodes_intersects(*nodemask, current->mems_allowed);
}
/*
diff --git a/kernel/exit.c b/kernel/exit.c
index 97f609f574b1..2a9d98c641ac 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -967,7 +967,7 @@ NORET_TYPE void do_exit(long code)
proc_exit_connector(tsk);
exit_notify(tsk, group_dead);
#ifdef CONFIG_NUMA
- mpol_free(tsk->mempolicy);
+ mpol_put(tsk->mempolicy);
tsk->mempolicy = NULL;
#endif
#ifdef CONFIG_FUTEX
diff --git a/kernel/fork.c b/kernel/fork.c
index cb46befdd3a0..6067e429f281 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -279,7 +279,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
if (!tmp)
goto fail_nomem;
*tmp = *mpnt;
- pol = mpol_copy(vma_policy(mpnt));
+ pol = mpol_dup(vma_policy(mpnt));
retval = PTR_ERR(pol);
if (IS_ERR(pol))
goto fail_nomem_policy;
@@ -521,7 +521,7 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
* Allocate a new mm structure and copy contents from the
* mm structure of the passed in task structure.
*/
-static struct mm_struct *dup_mm(struct task_struct *tsk)
+struct mm_struct *dup_mm(struct task_struct *tsk)
{
struct mm_struct *mm, *oldmm = current->mm;
int err;
@@ -1116,7 +1116,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->audit_context = NULL;
cgroup_fork(p);
#ifdef CONFIG_NUMA
- p->mempolicy = mpol_copy(p->mempolicy);
+ p->mempolicy = mpol_dup(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
@@ -1374,7 +1374,7 @@ bad_fork_cleanup_security:
security_task_free(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
- mpol_free(p->mempolicy);
+ mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
cgroup_exit(p, cgroup_callbacks_done);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f78777abe769..e379ef0e9c20 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1080,8 +1080,19 @@ static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
* If the timer was rearmed on another CPU, reprogram
* the event device.
*/
- if (timer->base->first == &timer->node)
- hrtimer_reprogram(timer, timer->base);
+ struct hrtimer_clock_base *base = timer->base;
+
+ if (base->first == &timer->node &&
+ hrtimer_reprogram(timer, base)) {
+ /*
+ * Timer is expired. Thus move it from tree to
+ * pending list again.
+ */
+ __remove_hrtimer(timer, base,
+ HRTIMER_STATE_PENDING, 0);
+ list_add_tail(&timer->cb_entry,
+ &base->cpu_base->cb_pending);
+ }
}
}
spin_unlock_irq(&cpu_base->lock);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 6782dce93d01..cb85c79989b4 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1405,6 +1405,9 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
VMCOREINFO_NUMBER(NR_FREE_PAGES);
+ VMCOREINFO_NUMBER(PG_lru);
+ VMCOREINFO_NUMBER(PG_private);
+ VMCOREINFO_NUMBER(PG_swapcache);
arch_crash_save_vmcoreinfo();
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index fcfb580c3afc..1e0250cb9486 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -72,6 +72,18 @@ DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
+/*
+ * Normally, functions that we'd want to prohibit kprobes in, are marked
+ * __kprobes. But, there are cases where such functions already belong to
+ * a different section (__sched for preempt_schedule)
+ *
+ * For such cases, we now have a blacklist
+ */
+struct kprobe_blackpoint kprobe_blacklist[] = {
+ {"preempt_schedule",},
+ {NULL} /* Terminator */
+};
+
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
/*
* kprobe->ainsn.insn points to the copy of the instruction to be
@@ -417,6 +429,21 @@ static inline void free_rp_inst(struct kretprobe *rp)
}
}
+static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
+{
+ unsigned long flags;
+ struct kretprobe_instance *ri;
+ struct hlist_node *pos, *next;
+ /* No race here */
+ spin_lock_irqsave(&kretprobe_lock, flags);
+ hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
+ ri->rp = NULL;
+ hlist_del(&ri->uflist);
+ }
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
+ free_rp_inst(rp);
+}
+
/*
* Keep all fields in the kprobe consistent
*/
@@ -492,9 +519,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
static int __kprobes in_kprobes_functions(unsigned long addr)
{
+ struct kprobe_blackpoint *kb;
+
if (addr >= (unsigned long)__kprobes_text_start &&
addr < (unsigned long)__kprobes_text_end)
return -EINVAL;
+ /*
+ * If there exists a kprobe_blacklist, verify and
+ * fail any probe registration in the prohibited area
+ */
+ for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
+ if (kb->start_addr) {
+ if (addr >= kb->start_addr &&
+ addr < (kb->start_addr + kb->range))
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -555,6 +595,7 @@ static int __kprobes __register_kprobe(struct kprobe *p,
}
p->nmissed = 0;
+ INIT_LIST_HEAD(&p->list);
mutex_lock(&kprobe_mutex);
old_p = get_kprobe(p->addr);
if (old_p) {
@@ -581,35 +622,28 @@ out:
return ret;
}
-int __kprobes register_kprobe(struct kprobe *p)
-{
- return __register_kprobe(p, (unsigned long)__builtin_return_address(0));
-}
-
-void __kprobes unregister_kprobe(struct kprobe *p)
+/*
+ * Unregister a kprobe without a scheduler synchronization.
+ */
+static int __kprobes __unregister_kprobe_top(struct kprobe *p)
{
- struct module *mod;
struct kprobe *old_p, *list_p;
- int cleanup_p;
- mutex_lock(&kprobe_mutex);
old_p = get_kprobe(p->addr);
- if (unlikely(!old_p)) {
- mutex_unlock(&kprobe_mutex);
- return;
- }
+ if (unlikely(!old_p))
+ return -EINVAL;
+
if (p != old_p) {
list_for_each_entry_rcu(list_p, &old_p->list, list)
if (list_p == p)
/* kprobe p is a valid probe */
goto valid_p;
- mutex_unlock(&kprobe_mutex);
- return;
+ return -EINVAL;
}
valid_p:
if (old_p == p ||
(old_p->pre_handler == aggr_pre_handler &&
- p->list.next == &old_p->list && p->list.prev == &old_p->list)) {
+ list_is_singular(&old_p->list))) {
/*
* Only probe on the hash list. Disarm only if kprobes are
* enabled - otherwise, the breakpoint would already have
@@ -618,43 +652,97 @@ valid_p:
if (kprobe_enabled)
arch_disarm_kprobe(p);
hlist_del_rcu(&old_p->hlist);
- cleanup_p = 1;
} else {
+ if (p->break_handler)
+ old_p->break_handler = NULL;
+ if (p->post_handler) {
+ list_for_each_entry_rcu(list_p, &old_p->list, list) {
+ if ((list_p != p) && (list_p->post_handler))
+ goto noclean;
+ }
+ old_p->post_handler = NULL;
+ }
+noclean:
list_del_rcu(&p->list);
- cleanup_p = 0;
}
+ return 0;
+}
- mutex_unlock(&kprobe_mutex);
+static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
+{
+ struct module *mod;
+ struct kprobe *old_p;
- synchronize_sched();
if (p->mod_refcounted) {
mod = module_text_address((unsigned long)p->addr);
if (mod)
module_put(mod);
}
- if (cleanup_p) {
- if (p != old_p) {
- list_del_rcu(&p->list);
+ if (list_empty(&p->list) || list_is_singular(&p->list)) {
+ if (!list_empty(&p->list)) {
+ /* "p" is the last child of an aggr_kprobe */
+ old_p = list_entry(p->list.next, struct kprobe, list);
+ list_del(&p->list);
kfree(old_p);
}
arch_remove_kprobe(p);
- } else {
- mutex_lock(&kprobe_mutex);
- if (p->break_handler)
- old_p->break_handler = NULL;
- if (p->post_handler){
- list_for_each_entry_rcu(list_p, &old_p->list, list){
- if (list_p->post_handler){
- cleanup_p = 2;
- break;
- }
- }
- if (cleanup_p == 0)
- old_p->post_handler = NULL;
+ }
+}
+
+static int __register_kprobes(struct kprobe **kps, int num,
+ unsigned long called_from)
+{
+ int i, ret = 0;
+
+ if (num <= 0)
+ return -EINVAL;
+ for (i = 0; i < num; i++) {
+ ret = __register_kprobe(kps[i], called_from);
+ if (ret < 0 && i > 0) {
+ unregister_kprobes(kps, i);
+ break;
}
- mutex_unlock(&kprobe_mutex);
}
+ return ret;
+}
+
+/*
+ * Registration and unregistration functions for kprobe.
+ */
+int __kprobes register_kprobe(struct kprobe *p)
+{
+ return __register_kprobes(&p, 1,
+ (unsigned long)__builtin_return_address(0));
+}
+
+void __kprobes unregister_kprobe(struct kprobe *p)
+{
+ unregister_kprobes(&p, 1);
+}
+
+int __kprobes register_kprobes(struct kprobe **kps, int num)
+{
+ return __register_kprobes(kps, num,
+ (unsigned long)__builtin_return_address(0));
+}
+
+void __kprobes unregister_kprobes(struct kprobe **kps, int num)
+{
+ int i;
+
+ if (num <= 0)
+ return;
+ mutex_lock(&kprobe_mutex);
+ for (i = 0; i < num; i++)
+ if (__unregister_kprobe_top(kps[i]) < 0)
+ kps[i]->addr = NULL;
+ mutex_unlock(&kprobe_mutex);
+
+ synchronize_sched();
+ for (i = 0; i < num; i++)
+ if (kps[i]->addr)
+ __unregister_kprobe_bottom(kps[i]);
}
static struct notifier_block kprobe_exceptions_nb = {
@@ -667,24 +755,69 @@ unsigned long __weak arch_deref_entry_point(void *entry)
return (unsigned long)entry;
}
-int __kprobes register_jprobe(struct jprobe *jp)
+static int __register_jprobes(struct jprobe **jps, int num,
+ unsigned long called_from)
{
- unsigned long addr = arch_deref_entry_point(jp->entry);
+ struct jprobe *jp;
+ int ret = 0, i;
- if (!kernel_text_address(addr))
+ if (num <= 0)
return -EINVAL;
+ for (i = 0; i < num; i++) {
+ unsigned long addr;
+ jp = jps[i];
+ addr = arch_deref_entry_point(jp->entry);
+
+ if (!kernel_text_address(addr))
+ ret = -EINVAL;
+ else {
+ /* Todo: Verify probepoint is a function entry point */
+ jp->kp.pre_handler = setjmp_pre_handler;
+ jp->kp.break_handler = longjmp_break_handler;
+ ret = __register_kprobe(&jp->kp, called_from);
+ }
+ if (ret < 0 && i > 0) {
+ unregister_jprobes(jps, i);
+ break;
+ }
+ }
+ return ret;
+}
- /* Todo: Verify probepoint is a function entry point */
- jp->kp.pre_handler = setjmp_pre_handler;
- jp->kp.break_handler = longjmp_break_handler;
-
- return __register_kprobe(&jp->kp,
+int __kprobes register_jprobe(struct jprobe *jp)
+{
+ return __register_jprobes(&jp, 1,
(unsigned long)__builtin_return_address(0));
}
void __kprobes unregister_jprobe(struct jprobe *jp)
{
- unregister_kprobe(&jp->kp);
+ unregister_jprobes(&jp, 1);
+}
+
+int __kprobes register_jprobes(struct jprobe **jps, int num)
+{
+ return __register_jprobes(jps, num,
+ (unsigned long)__builtin_return_address(0));
+}
+
+void __kprobes unregister_jprobes(struct jprobe **jps, int num)
+{
+ int i;
+
+ if (num <= 0)
+ return;
+ mutex_lock(&kprobe_mutex);
+ for (i = 0; i < num; i++)
+ if (__unregister_kprobe_top(&jps[i]->kp) < 0)
+ jps[i]->kp.addr = NULL;
+ mutex_unlock(&kprobe_mutex);
+
+ synchronize_sched();
+ for (i = 0; i < num; i++) {
+ if (jps[i]->kp.addr)
+ __unregister_kprobe_bottom(&jps[i]->kp);
+ }
}
#ifdef CONFIG_KRETPROBES
@@ -725,7 +858,8 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
return 0;
}
-int __kprobes register_kretprobe(struct kretprobe *rp)
+static int __kprobes __register_kretprobe(struct kretprobe *rp,
+ unsigned long called_from)
{
int ret = 0;
struct kretprobe_instance *inst;
@@ -771,46 +905,101 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
rp->nmissed = 0;
/* Establish function entry probe point */
- if ((ret = __register_kprobe(&rp->kp,
- (unsigned long)__builtin_return_address(0))) != 0)
+ ret = __register_kprobe(&rp->kp, called_from);
+ if (ret != 0)
free_rp_inst(rp);
return ret;
}
+static int __register_kretprobes(struct kretprobe **rps, int num,
+ unsigned long called_from)
+{
+ int ret = 0, i;
+
+ if (num <= 0)
+ return -EINVAL;
+ for (i = 0; i < num; i++) {
+ ret = __register_kretprobe(rps[i], called_from);
+ if (ret < 0 && i > 0) {
+ unregister_kretprobes(rps, i);
+ break;
+ }
+ }
+ return ret;
+}
+
+int __kprobes register_kretprobe(struct kretprobe *rp)
+{
+ return __register_kretprobes(&rp, 1,
+ (unsigned long)__builtin_return_address(0));
+}
+
+void __kprobes unregister_kretprobe(struct kretprobe *rp)
+{
+ unregister_kretprobes(&rp, 1);
+}
+
+int __kprobes register_kretprobes(struct kretprobe **rps, int num)
+{
+ return __register_kretprobes(rps, num,
+ (unsigned long)__builtin_return_address(0));
+}
+
+void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
+{
+ int i;
+
+ if (num <= 0)
+ return;
+ mutex_lock(&kprobe_mutex);
+ for (i = 0; i < num; i++)
+ if (__unregister_kprobe_top(&rps[i]->kp) < 0)
+ rps[i]->kp.addr = NULL;
+ mutex_unlock(&kprobe_mutex);
+
+ synchronize_sched();
+ for (i = 0; i < num; i++) {
+ if (rps[i]->kp.addr) {
+ __unregister_kprobe_bottom(&rps[i]->kp);
+ cleanup_rp_inst(rps[i]);
+ }
+ }
+}
+
#else /* CONFIG_KRETPROBES */
int __kprobes register_kretprobe(struct kretprobe *rp)
{
return -ENOSYS;
}
-static int __kprobes pre_handler_kretprobe(struct kprobe *p,
- struct pt_regs *regs)
+int __kprobes register_kretprobes(struct kretprobe **rps, int num)
{
- return 0;
+ return -ENOSYS;
}
-#endif /* CONFIG_KRETPROBES */
-
void __kprobes unregister_kretprobe(struct kretprobe *rp)
{
- unsigned long flags;
- struct kretprobe_instance *ri;
- struct hlist_node *pos, *next;
+}
- unregister_kprobe(&rp->kp);
+void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
+{
+}
- /* No race here */
- spin_lock_irqsave(&kretprobe_lock, flags);
- hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
- ri->rp = NULL;
- hlist_del(&ri->uflist);
- }
- spin_unlock_irqrestore(&kretprobe_lock, flags);
- free_rp_inst(rp);
+static int __kprobes pre_handler_kretprobe(struct kprobe *p,
+ struct pt_regs *regs)
+{
+ return 0;
}
+#endif /* CONFIG_KRETPROBES */
+
static int __init init_kprobes(void)
{
int i, err = 0;
+ unsigned long offset = 0, size = 0;
+ char *modname, namebuf[128];
+ const char *symbol_name;
+ void *addr;
+ struct kprobe_blackpoint *kb;
/* FIXME allocate the probe table, currently defined statically */
/* initialize all list heads */
@@ -819,6 +1008,28 @@ static int __init init_kprobes(void)
INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
}
+ /*
+ * Lookup and populate the kprobe_blacklist.
+ *
+ * Unlike the kretprobe blacklist, we'll need to determine
+ * the range of addresses that belong to the said functions,
+ * since a kprobe need not necessarily be at the beginning
+ * of a function.
+ */
+ for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
+ kprobe_lookup_name(kb->name, addr);
+ if (!addr)
+ continue;
+
+ kb->start_addr = (unsigned long)addr;
+ symbol_name = kallsyms_lookup(kb->start_addr,
+ &size, &offset, &modname, namebuf);
+ if (!symbol_name)
+ kb->range = 0;
+ else
+ kb->range = size;
+ }
+
if (kretprobe_blacklist_size) {
/* lookup the function address from its name */
for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
@@ -1066,8 +1277,12 @@ module_init(init_kprobes);
EXPORT_SYMBOL_GPL(register_kprobe);
EXPORT_SYMBOL_GPL(unregister_kprobe);
+EXPORT_SYMBOL_GPL(register_kprobes);
+EXPORT_SYMBOL_GPL(unregister_kprobes);
EXPORT_SYMBOL_GPL(register_jprobe);
EXPORT_SYMBOL_GPL(unregister_jprobe);
+EXPORT_SYMBOL_GPL(register_jprobes);
+EXPORT_SYMBOL_GPL(unregister_jprobes);
#ifdef CONFIG_KPROBES
EXPORT_SYMBOL_GPL(jprobe_return);
#endif
@@ -1075,4 +1290,6 @@ EXPORT_SYMBOL_GPL(jprobe_return);
#ifdef CONFIG_KPROBES
EXPORT_SYMBOL_GPL(register_kretprobe);
EXPORT_SYMBOL_GPL(unregister_kretprobe);
+EXPORT_SYMBOL_GPL(register_kretprobes);
+EXPORT_SYMBOL_GPL(unregister_kretprobes);
#endif
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 89bcf4973ee5..b8628be2a465 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -7,17 +7,39 @@
#include <linux/vt_kern.h>
#include <linux/kbd_kern.h>
#include <linux/console.h>
+#include <linux/module.h>
#include "power.h"
#if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
static int orig_fgconsole, orig_kmsg;
+static int disable_vt_switch;
+
+/*
+ * Normally during a suspend, we allocate a new console and switch to it.
+ * When we resume, we switch back to the original console. This switch
+ * can be slow, so on systems where the framebuffer can handle restoration
+ * of video registers anyways, there's little point in doing the console
+ * switch. This function allows you to disable it by passing it '0'.
+ */
+void pm_set_vt_switch(int do_switch)
+{
+ acquire_console_sem();
+ disable_vt_switch = !do_switch;
+ release_console_sem();
+}
+EXPORT_SYMBOL(pm_set_vt_switch);
int pm_prepare_console(void)
{
acquire_console_sem();
+ if (disable_vt_switch) {
+ release_console_sem();
+ return 0;
+ }
+
orig_fgconsole = fg_console;
if (vc_allocate(SUSPEND_CONSOLE)) {
@@ -50,9 +72,12 @@ int pm_prepare_console(void)
void pm_restore_console(void)
{
acquire_console_sem();
+ if (disable_vt_switch) {
+ release_console_sem();
+ return;
+ }
set_console(orig_fgconsole);
release_console_sem();
kmsg_redirect = orig_kmsg;
- return;
}
#endif
diff --git a/kernel/sys.c b/kernel/sys.c
index 6a0cc71ee88d..f2a451366953 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1632,10 +1632,9 @@ asmlinkage long sys_umask(int mask)
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
- long error;
+ long uninitialized_var(error);
- error = security_task_prctl(option, arg2, arg3, arg4, arg5);
- if (error)
+ if (security_task_prctl(option, arg2, arg3, arg4, arg5, &error))
return error;
switch (option) {
@@ -1688,17 +1687,6 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
error = -EINVAL;
break;
- case PR_GET_KEEPCAPS:
- if (current->keep_capabilities)
- error = 1;
- break;
- case PR_SET_KEEPCAPS:
- if (arg2 != 0 && arg2 != 1) {
- error = -EINVAL;
- break;
- }
- current->keep_capabilities = arg2;
- break;
case PR_SET_NAME: {
struct task_struct *me = current;
unsigned char ncomm[sizeof(me->comm)];
@@ -1732,17 +1720,6 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
case PR_SET_SECCOMP:
error = prctl_set_seccomp(arg2);
break;
-
- case PR_CAPBSET_READ:
- if (!cap_valid(arg2))
- return -EINVAL;
- return !!cap_raised(current->cap_bset, arg2);
- case PR_CAPBSET_DROP:
-#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
- return cap_prctl_drop(arg2);
-#else
- return -EINVAL;
-#endif
case PR_GET_TSC:
error = GET_TSC_CTL(arg2);
break;
diff --git a/lib/bitmap.c b/lib/bitmap.c
index a6939e18d7bb..c4cb48f77f0c 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -714,6 +714,164 @@ int bitmap_bitremap(int oldbit, const unsigned long *old,
}
EXPORT_SYMBOL(bitmap_bitremap);
+/**
+ * bitmap_onto - translate one bitmap relative to another
+ * @dst: resulting translated bitmap
+ * @orig: original untranslated bitmap
+ * @relmap: bitmap relative to which translated
+ * @bits: number of bits in each of these bitmaps
+ *
+ * Set the n-th bit of @dst iff there exists some m such that the
+ * n-th bit of @relmap is set, the m-th bit of @orig is set, and
+ * the n-th bit of @relmap is also the m-th _set_ bit of @relmap.
+ * (If you understood the previous sentence the first time your
+ * read it, you're overqualified for your current job.)
+ *
+ * In other words, @orig is mapped onto (surjectively) @dst,
+ * using the the map { <n, m> | the n-th bit of @relmap is the
+ * m-th set bit of @relmap }.
+ *
+ * Any set bits in @orig above bit number W, where W is the
+ * weight of (number of set bits in) @relmap are mapped nowhere.
+ * In particular, if for all bits m set in @orig, m >= W, then
+ * @dst will end up empty. In situations where the possibility
+ * of such an empty result is not desired, one way to avoid it is
+ * to use the bitmap_fold() operator, below, to first fold the
+ * @orig bitmap over itself so that all its set bits x are in the
+ * range 0 <= x < W. The bitmap_fold() operator does this by
+ * setting the bit (m % W) in @dst, for each bit (m) set in @orig.
+ *
+ * Example [1] for bitmap_onto():
+ * Let's say @relmap has bits 30-39 set, and @orig has bits
+ * 1, 3, 5, 7, 9 and 11 set. Then on return from this routine,
+ * @dst will have bits 31, 33, 35, 37 and 39 set.
+ *
+ * When bit 0 is set in @orig, it means turn on the bit in
+ * @dst corresponding to whatever is the first bit (if any)
+ * that is turned on in @relmap. Since bit 0 was off in the
+ * above example, we leave off that bit (bit 30) in @dst.
+ *
+ * When bit 1 is set in @orig (as in the above example), it
+ * means turn on the bit in @dst corresponding to whatever
+ * is the second bit that is turned on in @relmap. The second
+ * bit in @relmap that was turned on in the above example was
+ * bit 31, so we turned on bit 31 in @dst.
+ *
+ * Similarly, we turned on bits 33, 35, 37 and 39 in @dst,
+ * because they were the 4th, 6th, 8th and 10th set bits
+ * set in @relmap, and the 4th, 6th, 8th and 10th bits of
+ * @orig (i.e. bits 3, 5, 7 and 9) were also set.
+ *
+ * When bit 11 is set in @orig, it means turn on the bit in
+ * @dst corresponding to whatever is the twelth bit that is
+ * turned on in @relmap. In the above example, there were
+ * only ten bits turned on in @relmap (30..39), so that bit
+ * 11 was set in @orig had no affect on @dst.
+ *
+ * Example [2] for bitmap_fold() + bitmap_onto():
+ * Let's say @relmap has these ten bits set:
+ * 40 41 42 43 45 48 53 61 74 95
+ * (for the curious, that's 40 plus the first ten terms of the
+ * Fibonacci sequence.)
+ *
+ * Further lets say we use the following code, invoking
+ * bitmap_fold() then bitmap_onto, as suggested above to
+ * avoid the possitility of an empty @dst result:
+ *
+ * unsigned long *tmp; // a temporary bitmap's bits
+ *
+ * bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits);
+ * bitmap_onto(dst, tmp, relmap, bits);
+ *
+ * Then this table shows what various values of @dst would be, for
+ * various @orig's. I list the zero-based positions of each set bit.
+ * The tmp column shows the intermediate result, as computed by
+ * using bitmap_fold() to fold the @orig bitmap modulo ten
+ * (the weight of @relmap).
+ *
+ * @orig tmp @dst
+ * 0 0 40
+ * 1 1 41
+ * 9 9 95
+ * 10 0 40 (*)
+ * 1 3 5 7 1 3 5 7 41 43 48 61
+ * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45
+ * 0 9 18 27 0 9 8 7 40 61 74 95
+ * 0 10 20 30 0 40
+ * 0 11 22 33 0 1 2 3 40 41 42 43
+ * 0 12 24 36 0 2 4 6 40 42 45 53
+ * 78 102 211 1 2 8 41 42 74 (*)
+ *
+ * (*) For these marked lines, if we hadn't first done bitmap_fold()
+ * into tmp, then the @dst result would have been empty.
+ *
+ * If either of @orig or @relmap is empty (no set bits), then @dst
+ * will be returned empty.
+ *
+ * If (as explained above) the only set bits in @orig are in positions
+ * m where m >= W, (where W is the weight of @relmap) then @dst will
+ * once again be returned empty.
+ *
+ * All bits in @dst not set by the above rule are cleared.
+ */
+void bitmap_onto(unsigned long *dst, const unsigned long *orig,
+ const unsigned long *relmap, int bits)
+{
+ int n, m; /* same meaning as in above comment */
+
+ if (dst == orig) /* following doesn't handle inplace mappings */
+ return;
+ bitmap_zero(dst, bits);
+
+ /*
+ * The following code is a more efficient, but less
+ * obvious, equivalent to the loop:
+ * for (m = 0; m < bitmap_weight(relmap, bits); m++) {
+ * n = bitmap_ord_to_pos(orig, m, bits);
+ * if (test_bit(m, orig))
+ * set_bit(n, dst);
+ * }
+ */
+
+ m = 0;
+ for (n = find_first_bit(relmap, bits);
+ n < bits;
+ n = find_next_bit(relmap, bits, n + 1)) {
+ /* m == bitmap_pos_to_ord(relmap, n, bits) */
+ if (test_bit(m, orig))
+ set_bit(n, dst);
+ m++;
+ }
+}
+EXPORT_SYMBOL(bitmap_onto);
+
+/**
+ * bitmap_fold - fold larger bitmap into smaller, modulo specified size
+ * @dst: resulting smaller bitmap
+ * @orig: original larger bitmap
+ * @sz: specified size
+ * @bits: number of bits in each of these bitmaps
+ *
+ * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst.
+ * Clear all other bits in @dst. See further the comment and
+ * Example [2] for bitmap_onto() for why and how to use this.
+ */
+void bitmap_fold(unsigned long *dst, const unsigned long *orig,
+ int sz, int bits)
+{
+ int oldbit;
+
+ if (dst == orig) /* following doesn't handle inplace mappings */
+ return;
+ bitmap_zero(dst, bits);
+
+ for (oldbit = find_first_bit(orig, bits);
+ oldbit < bits;
+ oldbit = find_next_bit(orig, bits, oldbit + 1))
+ set_bit(oldbit % sz, dst);
+}
+EXPORT_SYMBOL(bitmap_fold);
+
/*
* Common code for bitmap_*_region() routines.
* bitmap: array of unsigned longs corresponding to the bitmap
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 65f0e758ec38..bd521716ab1a 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -114,8 +114,7 @@ radix_tree_node_alloc(struct radix_tree_root *root)
}
}
if (ret == NULL)
- ret = kmem_cache_alloc(radix_tree_node_cachep,
- set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
+ ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
BUG_ON(radix_tree_is_indirect_ptr(ret));
return ret;
@@ -150,8 +149,7 @@ int radix_tree_preload(gfp_t gfp_mask)
rtp = &__get_cpu_var(radix_tree_preloads);
while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
preempt_enable();
- node = kmem_cache_alloc(radix_tree_node_cachep,
- set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
+ node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
if (node == NULL)
goto out;
preempt_disable();
@@ -1098,7 +1096,8 @@ void __init radix_tree_init(void)
{
radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
sizeof(struct radix_tree_node), 0,
- SLAB_PANIC, radix_tree_node_ctor);
+ SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
+ radix_tree_node_ctor);
radix_tree_init_maxindex();
hotcpu_notifier(radix_tree_callback, 0);
}
diff --git a/mm/Kconfig b/mm/Kconfig
index 0016ebd4dcba..3aa819d628c1 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -143,6 +143,18 @@ config MEMORY_HOTREMOVE
depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
depends on MIGRATION
+#
+# If we have space for more page flags then we can enable additional
+# optimizations and functionality.
+#
+# Regular Sparsemem takes page flag bits for the sectionid if it does not
+# use a virtual memmap. Disable extended page flags for 32 bit platforms
+# that require the use of a sectionid in the page flags.
+#
+config PAGEFLAGS_EXTENDED
+ def_bool y
+ depends on 64BIT || SPARSEMEM_VMEMMAP || !NUMA || !SPARSEMEM
+
# Heavily threaded applications may benefit from splitting the mm-wide
# page_table_lock, so that faults on different parts of the user address
# space can be handled with less contention: split it at this NR_CPUS.
diff --git a/mm/bootmem.c b/mm/bootmem.c
index b6791646143e..e8fb927392b9 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -461,6 +461,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
{
+ register_page_bootmem_info_node(pgdat);
return free_all_bootmem_core(pgdat);
}
@@ -544,6 +545,37 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
return __alloc_bootmem(size, align, goal);
}
+#ifdef CONFIG_SPARSEMEM
+void * __init alloc_bootmem_section(unsigned long size,
+ unsigned long section_nr)
+{
+ void *ptr;
+ unsigned long limit, goal, start_nr, end_nr, pfn;
+ struct pglist_data *pgdat;
+
+ pfn = section_nr_to_pfn(section_nr);
+ goal = PFN_PHYS(pfn);
+ limit = PFN_PHYS(section_nr_to_pfn(section_nr + 1)) - 1;
+ pgdat = NODE_DATA(early_pfn_to_nid(pfn));
+ ptr = __alloc_bootmem_core(pgdat->bdata, size, SMP_CACHE_BYTES, goal,
+ limit);
+
+ if (!ptr)
+ return NULL;
+
+ start_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr)));
+ end_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr) + size));
+ if (start_nr != section_nr || end_nr != section_nr) {
+ printk(KERN_WARNING "alloc_bootmem failed on section %ld.\n",
+ section_nr);
+ free_bootmem_core(pgdat->bdata, __pa(ptr), size);
+ ptr = NULL;
+ }
+
+ return ptr;
+}
+#endif
+
#ifndef ARCH_LOW_ADDRESS_LIMIT
#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
#endif
diff --git a/mm/dmapool.c b/mm/dmapool.c
index 34aaac451a96..b1f0885dda22 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -37,6 +37,10 @@
#include <linux/types.h>
#include <linux/wait.h>
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
+#define DMAPOOL_DEBUG 1
+#endif
+
struct dma_pool { /* the pool */
struct list_head page_list;
spinlock_t lock;
@@ -216,7 +220,7 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
&page->dma, mem_flags);
if (page->vaddr) {
-#ifdef CONFIG_DEBUG_SLAB
+#ifdef DMAPOOL_DEBUG
memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
#endif
pool_initialise_page(pool, page);
@@ -239,7 +243,7 @@ static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
{
dma_addr_t dma = page->dma;
-#ifdef CONFIG_DEBUG_SLAB
+#ifdef DMAPOOL_DEBUG
memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
#endif
dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
@@ -336,7 +340,7 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
page->offset = *(int *)(page->vaddr + offset);
retval = offset + page->vaddr;
*handle = offset + page->dma;
-#ifdef CONFIG_DEBUG_SLAB
+#ifdef DMAPOOL_DEBUG
memset(retval, POOL_POISON_ALLOCATED, pool->size);
#endif
done:
@@ -391,7 +395,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
}
offset = vaddr - page->vaddr;
-#ifdef CONFIG_DEBUG_SLAB
+#ifdef DMAPOOL_DEBUG
if ((dma - page->dma) != offset) {
if (pool->dev)
dev_err(pool->dev,
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 3c0f1e99f5e4..343cfdfebd9e 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -49,7 +49,7 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
goto out;
}
- if (mapping->a_ops->get_xip_page) {
+ if (mapping->a_ops->get_xip_mem) {
switch (advice) {
case POSIX_FADV_NORMAL:
case POSIX_FADV_RANDOM:
diff --git a/mm/filemap.c b/mm/filemap.c
index 07e9d9258b48..239d36163bbe 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -576,10 +576,12 @@ EXPORT_SYMBOL(unlock_page);
*/
void end_page_writeback(struct page *page)
{
- if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) {
- if (!test_clear_page_writeback(page))
- BUG();
- }
+ if (TestClearPageReclaim(page))
+ rotate_reclaimable_page(page);
+
+ if (!test_clear_page_writeback(page))
+ BUG();
+
smp_mb__after_clear_bit();
wake_up_page(page, PG_writeback);
}
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 5e598c42afd7..3e744abcce9d 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -15,6 +15,7 @@
#include <linux/rmap.h>
#include <linux/sched.h>
#include <asm/tlbflush.h>
+#include <asm/io.h>
/*
* We do use our own empty page to avoid interference with other users
@@ -42,37 +43,41 @@ static struct page *xip_sparse_page(void)
/*
* This is a file read routine for execute in place files, and uses
- * the mapping->a_ops->get_xip_page() function for the actual low-level
+ * the mapping->a_ops->get_xip_mem() function for the actual low-level
* stuff.
*
* Note the struct file* is not used at all. It may be NULL.
*/
-static void
+static ssize_t
do_xip_mapping_read(struct address_space *mapping,
struct file_ra_state *_ra,
struct file *filp,
- loff_t *ppos,
- read_descriptor_t *desc,
- read_actor_t actor)
+ char __user *buf,
+ size_t len,
+ loff_t *ppos)
{
struct inode *inode = mapping->host;
pgoff_t index, end_index;
unsigned long offset;
- loff_t isize;
+ loff_t isize, pos;
+ size_t copied = 0, error = 0;
- BUG_ON(!mapping->a_ops->get_xip_page);
+ BUG_ON(!mapping->a_ops->get_xip_mem);
- index = *ppos >> PAGE_CACHE_SHIFT;
- offset = *ppos & ~PAGE_CACHE_MASK;
+ pos = *ppos;
+ index = pos >> PAGE_CACHE_SHIFT;
+ offset = pos & ~PAGE_CACHE_MASK;
isize = i_size_read(inode);
if (!isize)
goto out;
end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
- for (;;) {
- struct page *page;
- unsigned long nr, ret;
+ do {
+ unsigned long nr, left;
+ void *xip_mem;
+ unsigned long xip_pfn;
+ int zero = 0;
/* nr is the maximum number of bytes to copy from this page */
nr = PAGE_CACHE_SIZE;
@@ -85,19 +90,17 @@ do_xip_mapping_read(struct address_space *mapping,
}
}
nr = nr - offset;
+ if (nr > len)
+ nr = len;
- page = mapping->a_ops->get_xip_page(mapping,
- index*(PAGE_SIZE/512), 0);
- if (!page)
- goto no_xip_page;
- if (unlikely(IS_ERR(page))) {
- if (PTR_ERR(page) == -ENODATA) {
+ error = mapping->a_ops->get_xip_mem(mapping, index, 0,
+ &xip_mem, &xip_pfn);
+ if (unlikely(error)) {
+ if (error == -ENODATA) {
/* sparse */
- page = ZERO_PAGE(0);
- } else {
- desc->error = PTR_ERR(page);
+ zero = 1;
+ } else
goto out;
- }
}
/* If users can be writing to this page using arbitrary
@@ -105,10 +108,10 @@ do_xip_mapping_read(struct address_space *mapping,
* before reading the page on the kernel side.
*/
if (mapping_writably_mapped(mapping))
- flush_dcache_page(page);
+ /* address based flush */ ;
/*
- * Ok, we have the page, so now we can copy it to user space...
+ * Ok, we have the mem, so now we can copy it to user space...
*
* The actor routine returns how many bytes were actually used..
* NOTE! This may not be the same as how much of a user buffer
@@ -116,47 +119,38 @@ do_xip_mapping_read(struct address_space *mapping,
* "pos" here (the actor routine has to update the user buffer
* pointers and the remaining count).
*/
- ret = actor(desc, page, offset, nr);
- offset += ret;
- index += offset >> PAGE_CACHE_SHIFT;
- offset &= ~PAGE_CACHE_MASK;
+ if (!zero)
+ left = __copy_to_user(buf+copied, xip_mem+offset, nr);
+ else
+ left = __clear_user(buf + copied, nr);
- if (ret == nr && desc->count)
- continue;
- goto out;
+ if (left) {
+ error = -EFAULT;
+ goto out;
+ }
-no_xip_page:
- /* Did not get the page. Report it */
- desc->error = -EIO;
- goto out;
- }
+ copied += (nr - left);
+ offset += (nr - left);
+ index += offset >> PAGE_CACHE_SHIFT;
+ offset &= ~PAGE_CACHE_MASK;
+ } while (copied < len);
out:
- *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
+ *ppos = pos + copied;
if (filp)
file_accessed(filp);
+
+ return (copied ? copied : error);
}
ssize_t
xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
{
- read_descriptor_t desc;
-
if (!access_ok(VERIFY_WRITE, buf, len))
return -EFAULT;
- desc.written = 0;
- desc.arg.buf = buf;
- desc.count = len;
- desc.error = 0;
-
- do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
- ppos, &desc, file_read_actor);
-
- if (desc.written)
- return desc.written;
- else
- return desc.error;
+ return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
+ buf, len, ppos);
}
EXPORT_SYMBOL_GPL(xip_file_read);
@@ -211,13 +205,16 @@ __xip_unmap (struct address_space * mapping,
*
* This function is derived from filemap_fault, but used for execute in place
*/
-static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf)
+static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct file *file = area->vm_file;
+ struct file *file = vma->vm_file;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
- struct page *page;
pgoff_t size;
+ void *xip_mem;
+ unsigned long xip_pfn;
+ struct page *page;
+ int error;
/* XXX: are VM_FAULT_ codes OK? */
@@ -225,35 +222,44 @@ static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf)
if (vmf->pgoff >= size)
return VM_FAULT_SIGBUS;
- page = mapping->a_ops->get_xip_page(mapping,
- vmf->pgoff*(PAGE_SIZE/512), 0);
- if (!IS_ERR(page))
- goto out;
- if (PTR_ERR(page) != -ENODATA)
+ error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
+ &xip_mem, &xip_pfn);
+ if (likely(!error))
+ goto found;
+ if (error != -ENODATA)
return VM_FAULT_OOM;
/* sparse block */
- if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
- (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) &&
+ if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
+ (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
(!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
+ int err;
+
/* maybe shared writable, allocate new block */
- page = mapping->a_ops->get_xip_page(mapping,
- vmf->pgoff*(PAGE_SIZE/512), 1);
- if (IS_ERR(page))
+ error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
+ &xip_mem, &xip_pfn);
+ if (error)
return VM_FAULT_SIGBUS;
- /* unmap page at pgoff from all other vmas */
+ /* unmap sparse mappings at pgoff from all other vmas */
__xip_unmap(mapping, vmf->pgoff);
+
+found:
+ err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
+ xip_pfn);
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ BUG_ON(err);
+ return VM_FAULT_NOPAGE;
} else {
/* not shared and writable, use xip_sparse_page() */
page = xip_sparse_page();
if (!page)
return VM_FAULT_OOM;
- }
-out:
- page_cache_get(page);
- vmf->page = page;
- return 0;
+ page_cache_get(page);
+ vmf->page = page;
+ return 0;
+ }
}
static struct vm_operations_struct xip_file_vm_ops = {
@@ -262,11 +268,11 @@ static struct vm_operations_struct xip_file_vm_ops = {
int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
{
- BUG_ON(!file->f_mapping->a_ops->get_xip_page);
+ BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
file_accessed(file);
vma->vm_ops = &xip_file_vm_ops;
- vma->vm_flags |= VM_CAN_NONLINEAR;
+ vma->vm_flags |= VM_CAN_NONLINEAR | VM_MIXEDMAP;
return 0;
}
EXPORT_SYMBOL_GPL(xip_file_mmap);
@@ -279,17 +285,17 @@ __xip_file_write(struct file *filp, const char __user *buf,
const struct address_space_operations *a_ops = mapping->a_ops;
struct inode *inode = mapping->host;
long status = 0;
- struct page *page;
size_t bytes;
ssize_t written = 0;
- BUG_ON(!mapping->a_ops->get_xip_page);
+ BUG_ON(!mapping->a_ops->get_xip_mem);
do {
unsigned long index;
unsigned long offset;
size_t copied;
- char *kaddr;
+ void *xip_mem;
+ unsigned long xip_pfn;
offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
index = pos >> PAGE_CACHE_SHIFT;
@@ -297,28 +303,22 @@ __xip_file_write(struct file *filp, const char __user *buf,
if (bytes > count)
bytes = count;
- page = a_ops->get_xip_page(mapping,
- index*(PAGE_SIZE/512), 0);
- if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) {
+ status = a_ops->get_xip_mem(mapping, index, 0,
+ &xip_mem, &xip_pfn);
+ if (status == -ENODATA) {
/* we allocate a new page unmap it */
- page = a_ops->get_xip_page(mapping,
- index*(PAGE_SIZE/512), 1);
- if (!IS_ERR(page))
+ status = a_ops->get_xip_mem(mapping, index, 1,
+ &xip_mem, &xip_pfn);
+ if (!status)
/* unmap page at pgoff from all other vmas */
__xip_unmap(mapping, index);
}
- if (IS_ERR(page)) {
- status = PTR_ERR(page);
+ if (status)
break;
- }
- fault_in_pages_readable(buf, bytes);
- kaddr = kmap_atomic(page, KM_USER0);
copied = bytes -
- __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
+ __copy_from_user_nocache(xip_mem + offset, buf, bytes);
if (likely(copied > 0)) {
status = copied;
@@ -398,7 +398,7 @@ EXPORT_SYMBOL_GPL(xip_file_write);
/*
* truncate a page used for execute in place
- * functionality is analog to block_truncate_page but does use get_xip_page
+ * functionality is analog to block_truncate_page but does use get_xip_mem
* to get the page instead of page cache
*/
int
@@ -408,9 +408,11 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
unsigned offset = from & (PAGE_CACHE_SIZE-1);
unsigned blocksize;
unsigned length;
- struct page *page;
+ void *xip_mem;
+ unsigned long xip_pfn;
+ int err;
- BUG_ON(!mapping->a_ops->get_xip_page);
+ BUG_ON(!mapping->a_ops->get_xip_mem);
blocksize = 1 << mapping->host->i_blkbits;
length = offset & (blocksize - 1);
@@ -421,18 +423,16 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
length = blocksize - length;
- page = mapping->a_ops->get_xip_page(mapping,
- index*(PAGE_SIZE/512), 0);
- if (!page)
- return -ENOMEM;
- if (unlikely(IS_ERR(page))) {
- if (PTR_ERR(page) == -ENODATA)
+ err = mapping->a_ops->get_xip_mem(mapping, index, 0,
+ &xip_mem, &xip_pfn);
+ if (unlikely(err)) {
+ if (err == -ENODATA)
/* Hole? No need to truncate */
return 0;
else
- return PTR_ERR(page);
+ return err;
}
- zero_user(page, offset, length);
+ memset(xip_mem + offset, 0, length);
return 0;
}
EXPORT_SYMBOL_GPL(xip_truncate_page);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 51c9e2c01640..df28c1773fb2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -95,13 +95,16 @@ static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
int nid;
struct page *page = NULL;
struct mempolicy *mpol;
+ nodemask_t *nodemask;
struct zonelist *zonelist = huge_zonelist(vma, address,
- htlb_alloc_mask, &mpol);
- struct zone **z;
-
- for (z = zonelist->zones; *z; z++) {
- nid = zone_to_nid(*z);
- if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
+ htlb_alloc_mask, &mpol, &nodemask);
+ struct zone *zone;
+ struct zoneref *z;
+
+ for_each_zone_zonelist_nodemask(zone, z, zonelist,
+ MAX_NR_ZONES - 1, nodemask) {
+ nid = zone_to_nid(zone);
+ if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
!list_empty(&hugepage_freelists[nid])) {
page = list_entry(hugepage_freelists[nid].next,
struct page, lru);
@@ -113,7 +116,7 @@ static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
break;
}
}
- mpol_free(mpol); /* unref if mpol !NULL */
+ mpol_cond_put(mpol);
return page;
}
@@ -129,6 +132,7 @@ static void update_and_free_page(struct page *page)
}
set_compound_page_dtor(page, NULL);
set_page_refcounted(page);
+ arch_release_hugepage(page);
__free_pages(page, HUGETLB_PAGE_ORDER);
}
@@ -198,6 +202,10 @@ static struct page *alloc_fresh_huge_page_node(int nid)
htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
HUGETLB_PAGE_ORDER);
if (page) {
+ if (arch_prepare_hugepage(page)) {
+ __free_pages(page, HUGETLB_PAGE_ORDER);
+ return 0;
+ }
set_compound_page_dtor(page, free_huge_page);
spin_lock(&hugetlb_lock);
nr_huge_pages++;
@@ -239,6 +247,11 @@ static int alloc_fresh_huge_page(void)
hugetlb_next_nid = next_nid;
} while (!page && hugetlb_next_nid != start_nid);
+ if (ret)
+ count_vm_event(HTLB_BUDDY_PGALLOC);
+ else
+ count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
+
return ret;
}
@@ -299,9 +312,11 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
*/
nr_huge_pages_node[nid]++;
surplus_huge_pages_node[nid]++;
+ __count_vm_event(HTLB_BUDDY_PGALLOC);
} else {
nr_huge_pages--;
surplus_huge_pages--;
+ __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
}
spin_unlock(&hugetlb_lock);
@@ -369,11 +384,19 @@ retry:
resv_huge_pages += delta;
ret = 0;
free:
+ /* Free the needed pages to the hugetlb pool */
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+ if ((--needed) < 0)
+ break;
list_del(&page->lru);
- if ((--needed) >= 0)
- enqueue_huge_page(page);
- else {
+ enqueue_huge_page(page);
+ }
+
+ /* Free unnecessary surplus pages to the buddy allocator */
+ if (!list_empty(&surplus_list)) {
+ spin_unlock(&hugetlb_lock);
+ list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+ list_del(&page->lru);
/*
* The page has a reference count of zero already, so
* call free_huge_page directly instead of using
@@ -381,10 +404,9 @@ free:
* unlocked which is safe because free_huge_page takes
* hugetlb_lock before deciding how to free the page.
*/
- spin_unlock(&hugetlb_lock);
free_huge_page(page);
- spin_lock(&hugetlb_lock);
}
+ spin_lock(&hugetlb_lock);
}
return ret;
@@ -718,7 +740,7 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
entry =
pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
} else {
- entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
+ entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
}
entry = pte_mkyoung(entry);
entry = pte_mkhuge(entry);
@@ -731,8 +753,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
{
pte_t entry;
- entry = pte_mkwrite(pte_mkdirty(*ptep));
- if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
+ entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
+ if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
update_mmu_cache(vma, address, entry);
}
}
@@ -762,10 +784,10 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
spin_lock(&dst->page_table_lock);
spin_lock(&src->page_table_lock);
- if (!pte_none(*src_pte)) {
+ if (!huge_pte_none(huge_ptep_get(src_pte))) {
if (cow)
- ptep_set_wrprotect(src, addr, src_pte);
- entry = *src_pte;
+ huge_ptep_set_wrprotect(src, addr, src_pte);
+ entry = huge_ptep_get(src_pte);
ptepage = pte_page(entry);
get_page(ptepage);
set_huge_pte_at(dst, addr, dst_pte, entry);
@@ -809,7 +831,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
continue;
pte = huge_ptep_get_and_clear(mm, address, ptep);
- if (pte_none(pte))
+ if (huge_pte_none(pte))
continue;
page = pte_page(pte);
@@ -873,8 +895,9 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
spin_lock(&mm->page_table_lock);
ptep = huge_pte_offset(mm, address & HPAGE_MASK);
- if (likely(pte_same(*ptep, pte))) {
+ if (likely(pte_same(huge_ptep_get(ptep), pte))) {
/* Break COW */
+ huge_ptep_clear_flush(vma, address, ptep);
set_huge_pte_at(mm, address, ptep,
make_huge_pte(vma, new_page, 1));
/* Make the old page be freed below */
@@ -942,7 +965,7 @@ retry:
goto backout;
ret = 0;
- if (!pte_none(*ptep))
+ if (!huge_pte_none(huge_ptep_get(ptep)))
goto backout;
new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
@@ -984,8 +1007,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* the same page in the page cache.
*/
mutex_lock(&hugetlb_instantiation_mutex);
- entry = *ptep;
- if (pte_none(entry)) {
+ entry = huge_ptep_get(ptep);
+ if (huge_pte_none(entry)) {
ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
mutex_unlock(&hugetlb_instantiation_mutex);
return ret;
@@ -995,7 +1018,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
spin_lock(&mm->page_table_lock);
/* Check for a racing update before calling hugetlb_cow */
- if (likely(pte_same(entry, *ptep)))
+ if (likely(pte_same(entry, huge_ptep_get(ptep))))
if (write_access && !pte_write(entry))
ret = hugetlb_cow(mm, vma, address, ptep, entry);
spin_unlock(&mm->page_table_lock);
@@ -1025,7 +1048,8 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
*/
pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
- if (!pte || pte_none(*pte) || (write && !pte_write(*pte))) {
+ if (!pte || huge_pte_none(huge_ptep_get(pte)) ||
+ (write && !pte_write(huge_ptep_get(pte)))) {
int ret;
spin_unlock(&mm->page_table_lock);
@@ -1041,7 +1065,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
}
pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
- page = pte_page(*pte);
+ page = pte_page(huge_ptep_get(pte));
same_page:
if (pages) {
get_page(page);
@@ -1090,7 +1114,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
continue;
if (huge_pmd_unshare(mm, &address, ptep))
continue;
- if (!pte_none(*ptep)) {
+ if (!huge_pte_none(huge_ptep_get(ptep))) {
pte = huge_ptep_get_and_clear(mm, address, ptep);
pte = pte_mkhuge(pte_modify(pte, newprot));
set_huge_pte_at(mm, address, ptep, pte);
diff --git a/mm/internal.h b/mm/internal.h
index 789727309f4d..0034e947e4bc 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -34,8 +34,7 @@ static inline void __put_page(struct page *page)
atomic_dec(&page->_count);
}
-extern void __init __free_pages_bootmem(struct page *page,
- unsigned int order);
+extern void __free_pages_bootmem(struct page *page, unsigned int order);
/*
* function for dealing with page's order in buddy system.
diff --git a/mm/madvise.c b/mm/madvise.c
index 93ee375b38e7..23a0ec3e0ea0 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -112,7 +112,7 @@ static long madvise_willneed(struct vm_area_struct * vma,
if (!file)
return -EBADF;
- if (file->f_mapping->a_ops->get_xip_page) {
+ if (file->f_mapping->a_ops->get_xip_mem) {
/* no bad return value, but ignore advice */
return 0;
}
diff --git a/mm/memory.c b/mm/memory.c
index 0d14d1e58a5f..bbab1e37055e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -371,57 +371,93 @@ static inline int is_cow_mapping(unsigned int flags)
}
/*
- * This function gets the "struct page" associated with a pte.
+ * vm_normal_page -- This function gets the "struct page" associated with a pte.
*
- * NOTE! Some mappings do not have "struct pages". A raw PFN mapping
- * will have each page table entry just pointing to a raw page frame
- * number, and as far as the VM layer is concerned, those do not have
- * pages associated with them - even if the PFN might point to memory
- * that otherwise is perfectly fine and has a "struct page".
+ * "Special" mappings do not wish to be associated with a "struct page" (either
+ * it doesn't exist, or it exists but they don't want to touch it). In this
+ * case, NULL is returned here. "Normal" mappings do have a struct page.
*
- * The way we recognize those mappings is through the rules set up
- * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set,
- * and the vm_pgoff will point to the first PFN mapped: thus every
- * page that is a raw mapping will always honor the rule
+ * There are 2 broad cases. Firstly, an architecture may define a pte_special()
+ * pte bit, in which case this function is trivial. Secondly, an architecture
+ * may not have a spare pte bit, which requires a more complicated scheme,
+ * described below.
+ *
+ * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
+ * special mapping (even if there are underlying and valid "struct pages").
+ * COWed pages of a VM_PFNMAP are always normal.
+ *
+ * The way we recognize COWed pages within VM_PFNMAP mappings is through the
+ * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
+ * set, and the vm_pgoff will point to the first PFN mapped: thus every special
+ * mapping will always honor the rule
*
* pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
*
- * and if that isn't true, the page has been COW'ed (in which case it
- * _does_ have a "struct page" associated with it even if it is in a
- * VM_PFNMAP range).
+ * And for normal mappings this is false.
+ *
+ * This restricts such mappings to be a linear translation from virtual address
+ * to pfn. To get around this restriction, we allow arbitrary mappings so long
+ * as the vma is not a COW mapping; in that case, we know that all ptes are
+ * special (because none can have been COWed).
+ *
+ *
+ * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
+ *
+ * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
+ * page" backing, however the difference is that _all_ pages with a struct
+ * page (that is, those where pfn_valid is true) are refcounted and considered
+ * normal pages by the VM. The disadvantage is that pages are refcounted
+ * (which can be slower and simply not an option for some PFNMAP users). The
+ * advantage is that we don't have to follow the strict linearity rule of
+ * PFNMAP mappings in order to support COWable mappings.
+ *
*/
-struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+#ifdef __HAVE_ARCH_PTE_SPECIAL
+# define HAVE_PTE_SPECIAL 1
+#else
+# define HAVE_PTE_SPECIAL 0
+#endif
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte)
{
- unsigned long pfn = pte_pfn(pte);
+ unsigned long pfn;
- if (unlikely(vma->vm_flags & VM_PFNMAP)) {
- unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
- if (pfn == vma->vm_pgoff + off)
- return NULL;
- if (!is_cow_mapping(vma->vm_flags))
- return NULL;
+ if (HAVE_PTE_SPECIAL) {
+ if (likely(!pte_special(pte))) {
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+ return pte_page(pte);
+ }
+ VM_BUG_ON(!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
+ return NULL;
}
-#ifdef CONFIG_DEBUG_VM
- /*
- * Add some anal sanity checks for now. Eventually,
- * we should just do "return pfn_to_page(pfn)", but
- * in the meantime we check that we get a valid pfn,
- * and that the resulting page looks ok.
- */
- if (unlikely(!pfn_valid(pfn))) {
- print_bad_pte(vma, pte, addr);
- return NULL;
+ /* !HAVE_PTE_SPECIAL case follows: */
+
+ pfn = pte_pfn(pte);
+
+ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
+ if (vma->vm_flags & VM_MIXEDMAP) {
+ if (!pfn_valid(pfn))
+ return NULL;
+ goto out;
+ } else {
+ unsigned long off;
+ off = (addr - vma->vm_start) >> PAGE_SHIFT;
+ if (pfn == vma->vm_pgoff + off)
+ return NULL;
+ if (!is_cow_mapping(vma->vm_flags))
+ return NULL;
+ }
}
-#endif
+
+ VM_BUG_ON(!pfn_valid(pfn));
/*
- * NOTE! We still have PageReserved() pages in the page
- * tables.
+ * NOTE! We still have PageReserved() pages in the page tables.
*
- * The PAGE_ZERO() pages and various VDSO mappings can
- * cause them to exist.
+ * eg. VDSO mappings can cause them to exist.
*/
+out:
return pfn_to_page(pfn);
}
@@ -1057,8 +1093,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (pages)
foll_flags |= FOLL_GET;
if (!write && !(vma->vm_flags & VM_LOCKED) &&
- (!vma->vm_ops || (!vma->vm_ops->nopage &&
- !vma->vm_ops->fault)))
+ (!vma->vm_ops || !vma->vm_ops->fault))
foll_flags |= FOLL_ANON;
do {
@@ -1141,8 +1176,10 @@ pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
* old drivers should use this, and they needed to mark their
* pages reserved for the old functions anyway.
*/
-static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot)
+static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+ struct page *page, pgprot_t prot)
{
+ struct mm_struct *mm = vma->vm_mm;
int retval;
pte_t *pte;
spinlock_t *ptl;
@@ -1202,40 +1239,26 @@ out:
*
* The page does not need to be reserved.
*/
-int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page)
+int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+ struct page *page)
{
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
if (!page_count(page))
return -EINVAL;
vma->vm_flags |= VM_INSERTPAGE;
- return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
+ return insert_page(vma, addr, page, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_page);
-/**
- * vm_insert_pfn - insert single pfn into user vma
- * @vma: user vma to map to
- * @addr: target user address of this page
- * @pfn: source kernel pfn
- *
- * Similar to vm_inert_page, this allows drivers to insert individual pages
- * they've allocated into a user vma. Same comments apply.
- *
- * This function should only be called from a vm_ops->fault handler, and
- * in that case the handler should return NULL.
- */
-int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
- unsigned long pfn)
+static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, pgprot_t prot)
{
struct mm_struct *mm = vma->vm_mm;
int retval;
pte_t *pte, entry;
spinlock_t *ptl;
- BUG_ON(!(vma->vm_flags & VM_PFNMAP));
- BUG_ON(is_cow_mapping(vma->vm_flags));
-
retval = -ENOMEM;
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
@@ -1245,19 +1268,74 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
goto out_unlock;
/* Ok, finally just insert the thing.. */
- entry = pfn_pte(pfn, vma->vm_page_prot);
+ entry = pte_mkspecial(pfn_pte(pfn, prot));
set_pte_at(mm, addr, pte, entry);
- update_mmu_cache(vma, addr, entry);
+ update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */
retval = 0;
out_unlock:
pte_unmap_unlock(pte, ptl);
-
out:
return retval;
}
+
+/**
+ * vm_insert_pfn - insert single pfn into user vma
+ * @vma: user vma to map to
+ * @addr: target user address of this page
+ * @pfn: source kernel pfn
+ *
+ * Similar to vm_inert_page, this allows drivers to insert individual pages
+ * they've allocated into a user vma. Same comments apply.
+ *
+ * This function should only be called from a vm_ops->fault handler, and
+ * in that case the handler should return NULL.
+ */
+int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn)
+{
+ /*
+ * Technically, architectures with pte_special can avoid all these
+ * restrictions (same for remap_pfn_range). However we would like
+ * consistency in testing and feature parity among all, so we should
+ * try to keep these invariants in place for everybody.
+ */
+ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+ BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
+ (VM_PFNMAP|VM_MIXEDMAP));
+ BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
+ BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
+
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return -EFAULT;
+ return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+}
EXPORT_SYMBOL(vm_insert_pfn);
+int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn)
+{
+ BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
+
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return -EFAULT;
+
+ /*
+ * If we don't have pte special, then we have to use the pfn_valid()
+ * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
+ * refcount the page if pfn_valid is true (hence insert_page rather
+ * than insert_pfn).
+ */
+ if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
+ struct page *page;
+
+ page = pfn_to_page(pfn);
+ return insert_page(vma, addr, page, vma->vm_page_prot);
+ }
+ return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+}
+EXPORT_SYMBOL(vm_insert_mixed);
+
/*
* maps a range of physical memory into the requested pages. the old
* mappings are removed. any references to nonexistent pages results
@@ -1276,7 +1354,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
arch_enter_lazy_mmu_mode();
do {
BUG_ON(!pte_none(*pte));
- set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
+ set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
@@ -2199,20 +2277,9 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
BUG_ON(vma->vm_flags & VM_PFNMAP);
- if (likely(vma->vm_ops->fault)) {
- ret = vma->vm_ops->fault(vma, &vmf);
- if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
- return ret;
- } else {
- /* Legacy ->nopage path */
- ret = 0;
- vmf.page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
- /* no page was available -- either SIGBUS or OOM */
- if (unlikely(vmf.page == NOPAGE_SIGBUS))
- return VM_FAULT_SIGBUS;
- else if (unlikely(vmf.page == NOPAGE_OOM))
- return VM_FAULT_OOM;
- }
+ ret = vma->vm_ops->fault(vma, &vmf);
+ if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
+ return ret;
/*
* For consistency in subsequent calls, make the faulted page always
@@ -2377,10 +2444,13 @@ static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long pfn;
pte_unmap(page_table);
- BUG_ON(!(vma->vm_flags & VM_PFNMAP));
- BUG_ON(is_cow_mapping(vma->vm_flags));
+ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+ BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK);
+
+ BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
+
if (unlikely(pfn == NOPFN_OOM))
return VM_FAULT_OOM;
else if (unlikely(pfn == NOPFN_SIGBUS))
@@ -2458,7 +2528,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
if (!pte_present(entry)) {
if (pte_none(entry)) {
if (vma->vm_ops) {
- if (vma->vm_ops->fault || vma->vm_ops->nopage)
+ if (likely(vma->vm_ops->fault))
return do_linear_fault(mm, vma, address,
pte, pmd, write_access, entry);
if (unlikely(vma->vm_ops->nopfn))
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0fb330271271..c4ba85c8cb00 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -58,8 +58,105 @@ static void release_memory_resource(struct resource *res)
return;
}
-
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+#ifndef CONFIG_SPARSEMEM_VMEMMAP
+static void get_page_bootmem(unsigned long info, struct page *page, int magic)
+{
+ atomic_set(&page->_mapcount, magic);
+ SetPagePrivate(page);
+ set_page_private(page, info);
+ atomic_inc(&page->_count);
+}
+
+void put_page_bootmem(struct page *page)
+{
+ int magic;
+
+ magic = atomic_read(&page->_mapcount);
+ BUG_ON(magic >= -1);
+
+ if (atomic_dec_return(&page->_count) == 1) {
+ ClearPagePrivate(page);
+ set_page_private(page, 0);
+ reset_page_mapcount(page);
+ __free_pages_bootmem(page, 0);
+ }
+
+}
+
+void register_page_bootmem_info_section(unsigned long start_pfn)
+{
+ unsigned long *usemap, mapsize, section_nr, i;
+ struct mem_section *ms;
+ struct page *page, *memmap;
+
+ if (!pfn_valid(start_pfn))
+ return;
+
+ section_nr = pfn_to_section_nr(start_pfn);
+ ms = __nr_to_section(section_nr);
+
+ /* Get section's memmap address */
+ memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
+
+ /*
+ * Get page for the memmap's phys address
+ * XXX: need more consideration for sparse_vmemmap...
+ */
+ page = virt_to_page(memmap);
+ mapsize = sizeof(struct page) * PAGES_PER_SECTION;
+ mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
+
+ /* remember memmap's page */
+ for (i = 0; i < mapsize; i++, page++)
+ get_page_bootmem(section_nr, page, SECTION_INFO);
+
+ usemap = __nr_to_section(section_nr)->pageblock_flags;
+ page = virt_to_page(usemap);
+
+ mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
+
+ for (i = 0; i < mapsize; i++, page++)
+ get_page_bootmem(section_nr, page, MIX_INFO);
+
+}
+
+void register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+ unsigned long i, pfn, end_pfn, nr_pages;
+ int node = pgdat->node_id;
+ struct page *page;
+ struct zone *zone;
+
+ nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
+ page = virt_to_page(pgdat);
+
+ for (i = 0; i < nr_pages; i++, page++)
+ get_page_bootmem(node, page, NODE_INFO);
+
+ zone = &pgdat->node_zones[0];
+ for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
+ if (zone->wait_table) {
+ nr_pages = zone->wait_table_hash_nr_entries
+ * sizeof(wait_queue_head_t);
+ nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
+ page = virt_to_page(zone->wait_table);
+
+ for (i = 0; i < nr_pages; i++, page++)
+ get_page_bootmem(node, page, NODE_INFO);
+ }
+ }
+
+ pfn = pgdat->node_start_pfn;
+ end_pfn = pfn + pgdat->node_spanned_pages;
+
+ /* register_section info */
+ for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
+ register_page_bootmem_info_section(pfn);
+
+}
+#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
+
static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
{
struct pglist_data *pgdat = zone->zone_pgdat;
@@ -101,6 +198,36 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
return register_new_memory(__pfn_to_section(phys_start_pfn));
}
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static int __remove_section(struct zone *zone, struct mem_section *ms)
+{
+ /*
+ * XXX: Freeing memmap with vmemmap is not implement yet.
+ * This should be removed later.
+ */
+ return -EBUSY;
+}
+#else
+static int __remove_section(struct zone *zone, struct mem_section *ms)
+{
+ unsigned long flags;
+ struct pglist_data *pgdat = zone->zone_pgdat;
+ int ret = -EINVAL;
+
+ if (!valid_section(ms))
+ return ret;
+
+ ret = unregister_memory_section(ms);
+ if (ret)
+ return ret;
+
+ pgdat_resize_lock(pgdat, &flags);
+ sparse_remove_one_section(zone, ms);
+ pgdat_resize_unlock(pgdat, &flags);
+ return 0;
+}
+#endif
+
/*
* Reasonably generic function for adding memory. It is
* expected that archs that support memory hotplug will
@@ -134,6 +261,42 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
}
EXPORT_SYMBOL_GPL(__add_pages);
+/**
+ * __remove_pages() - remove sections of pages from a zone
+ * @zone: zone from which pages need to be removed
+ * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
+ * @nr_pages: number of pages to remove (must be multiple of section size)
+ *
+ * Generic helper function to remove section mappings and sysfs entries
+ * for the section of the memory we are removing. Caller needs to make
+ * sure that pages are marked reserved and zones are adjust properly by
+ * calling offline_pages().
+ */
+int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
+ unsigned long nr_pages)
+{
+ unsigned long i, ret = 0;
+ int sections_to_remove;
+
+ /*
+ * We can only remove entire sections
+ */
+ BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
+ BUG_ON(nr_pages % PAGES_PER_SECTION);
+
+ release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE);
+
+ sections_to_remove = nr_pages / PAGES_PER_SECTION;
+ for (i = 0; i < sections_to_remove; i++) {
+ unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
+ ret = __remove_section(zone, __pfn_to_section(pfn));
+ if (ret)
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__remove_pages);
+
static void grow_zone_span(struct zone *zone,
unsigned long start_pfn, unsigned long end_pfn)
{
@@ -164,6 +327,25 @@ static void grow_pgdat_span(struct pglist_data *pgdat,
pgdat->node_start_pfn;
}
+void online_page(struct page *page)
+{
+ totalram_pages++;
+ num_physpages++;
+
+#ifdef CONFIG_HIGHMEM
+ if (PageHighMem(page))
+ totalhigh_pages++;
+#endif
+
+#ifdef CONFIG_FLATMEM
+ max_mapnr = max(page_to_pfn(page), max_mapnr);
+#endif
+
+ ClearPageReserved(page);
+ init_page_count(page);
+ __free_page(page);
+}
+
static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
void *arg)
{
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 3c3601121509..a37a5034f63d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -63,7 +63,6 @@
grows down?
make bind policy root only? It can trigger oom much faster and the
kernel is not always grateful with that.
- could replace all the switch()es with a mempolicy_ops structure.
*/
#include <linux/mempolicy.h>
@@ -89,6 +88,7 @@
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/syscalls.h>
+#include <linux/ctype.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
@@ -105,142 +105,264 @@ static struct kmem_cache *sn_cache;
policied. */
enum zone_type policy_zone = 0;
+/*
+ * run-time system-wide default policy => local allocation
+ */
struct mempolicy default_policy = {
.refcnt = ATOMIC_INIT(1), /* never free it */
- .policy = MPOL_DEFAULT,
+ .mode = MPOL_PREFERRED,
+ .flags = MPOL_F_LOCAL,
};
-static void mpol_rebind_policy(struct mempolicy *pol,
- const nodemask_t *newmask);
+static const struct mempolicy_operations {
+ int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
+ void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
+} mpol_ops[MPOL_MAX];
-/* Do sanity checking on a policy */
-static int mpol_check_policy(int mode, nodemask_t *nodes)
+/* Check that the nodemask contains at least one populated zone */
+static int is_valid_nodemask(const nodemask_t *nodemask)
{
- int was_empty, is_empty;
+ int nd, k;
- if (!nodes)
- return 0;
+ /* Check that there is something useful in this mask */
+ k = policy_zone;
- /*
- * "Contextualize" the in-coming nodemast for cpusets:
- * Remember whether in-coming nodemask was empty, If not,
- * restrict the nodes to the allowed nodes in the cpuset.
- * This is guaranteed to be a subset of nodes with memory.
- */
- cpuset_update_task_memory_state();
- is_empty = was_empty = nodes_empty(*nodes);
- if (!was_empty) {
- nodes_and(*nodes, *nodes, cpuset_current_mems_allowed);
- is_empty = nodes_empty(*nodes); /* after "contextualization" */
- }
+ for_each_node_mask(nd, *nodemask) {
+ struct zone *z;
- switch (mode) {
- case MPOL_DEFAULT:
- /*
- * require caller to specify an empty nodemask
- * before "contextualization"
- */
- if (!was_empty)
- return -EINVAL;
- break;
- case MPOL_BIND:
- case MPOL_INTERLEAVE:
- /*
- * require at least 1 valid node after "contextualization"
- */
- if (is_empty)
- return -EINVAL;
- break;
- case MPOL_PREFERRED:
- /*
- * Did caller specify invalid nodes?
- * Don't silently accept this as "local allocation".
- */
- if (!was_empty && is_empty)
- return -EINVAL;
- break;
+ for (k = 0; k <= policy_zone; k++) {
+ z = &NODE_DATA(nd)->node_zones[k];
+ if (z->present_pages > 0)
+ return 1;
+ }
}
+
return 0;
}
-/* Generate a custom zonelist for the BIND policy. */
-static struct zonelist *bind_zonelist(nodemask_t *nodes)
+static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
{
- struct zonelist *zl;
- int num, max, nd;
- enum zone_type k;
+ return pol->flags & (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES);
+}
- max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
- max++; /* space for zlcache_ptr (see mmzone.h) */
- zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
- if (!zl)
- return ERR_PTR(-ENOMEM);
- zl->zlcache_ptr = NULL;
- num = 0;
- /* First put in the highest zones from all nodes, then all the next
- lower zones etc. Avoid empty zones because the memory allocator
- doesn't like them. If you implement node hot removal you
- have to fix that. */
- k = MAX_NR_ZONES - 1;
- while (1) {
- for_each_node_mask(nd, *nodes) {
- struct zone *z = &NODE_DATA(nd)->node_zones[k];
- if (z->present_pages > 0)
- zl->zones[num++] = z;
- }
- if (k == 0)
- break;
- k--;
- }
- if (num == 0) {
- kfree(zl);
- return ERR_PTR(-EINVAL);
- }
- zl->zones[num] = NULL;
- return zl;
+static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
+ const nodemask_t *rel)
+{
+ nodemask_t tmp;
+ nodes_fold(tmp, *orig, nodes_weight(*rel));
+ nodes_onto(*ret, tmp, *rel);
+}
+
+static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
+{
+ if (nodes_empty(*nodes))
+ return -EINVAL;
+ pol->v.nodes = *nodes;
+ return 0;
+}
+
+static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
+{
+ if (!nodes)
+ pol->flags |= MPOL_F_LOCAL; /* local allocation */
+ else if (nodes_empty(*nodes))
+ return -EINVAL; /* no allowed nodes */
+ else
+ pol->v.preferred_node = first_node(*nodes);
+ return 0;
+}
+
+static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
+{
+ if (!is_valid_nodemask(nodes))
+ return -EINVAL;
+ pol->v.nodes = *nodes;
+ return 0;
}
/* Create a new policy */
-static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
+static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
+ nodemask_t *nodes)
{
struct mempolicy *policy;
+ nodemask_t cpuset_context_nmask;
+ int ret;
- pr_debug("setting mode %d nodes[0] %lx\n",
- mode, nodes ? nodes_addr(*nodes)[0] : -1);
+ pr_debug("setting mode %d flags %d nodes[0] %lx\n",
+ mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
- if (mode == MPOL_DEFAULT)
- return NULL;
+ if (mode == MPOL_DEFAULT) {
+ if (nodes && !nodes_empty(*nodes))
+ return ERR_PTR(-EINVAL);
+ return NULL; /* simply delete any existing policy */
+ }
+ VM_BUG_ON(!nodes);
+
+ /*
+ * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
+ * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
+ * All other modes require a valid pointer to a non-empty nodemask.
+ */
+ if (mode == MPOL_PREFERRED) {
+ if (nodes_empty(*nodes)) {
+ if (((flags & MPOL_F_STATIC_NODES) ||
+ (flags & MPOL_F_RELATIVE_NODES)))
+ return ERR_PTR(-EINVAL);
+ nodes = NULL; /* flag local alloc */
+ }
+ } else if (nodes_empty(*nodes))
+ return ERR_PTR(-EINVAL);
policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
if (!policy)
return ERR_PTR(-ENOMEM);
atomic_set(&policy->refcnt, 1);
- switch (mode) {
- case MPOL_INTERLEAVE:
- policy->v.nodes = *nodes;
- if (nodes_weight(policy->v.nodes) == 0) {
- kmem_cache_free(policy_cache, policy);
- return ERR_PTR(-EINVAL);
- }
- break;
- case MPOL_PREFERRED:
- policy->v.preferred_node = first_node(*nodes);
- if (policy->v.preferred_node >= MAX_NUMNODES)
- policy->v.preferred_node = -1;
- break;
- case MPOL_BIND:
- policy->v.zonelist = bind_zonelist(nodes);
- if (IS_ERR(policy->v.zonelist)) {
- void *error_code = policy->v.zonelist;
- kmem_cache_free(policy_cache, policy);
- return error_code;
- }
- break;
+ policy->mode = mode;
+ policy->flags = flags;
+
+ if (nodes) {
+ /*
+ * cpuset related setup doesn't apply to local allocation
+ */
+ cpuset_update_task_memory_state();
+ if (flags & MPOL_F_RELATIVE_NODES)
+ mpol_relative_nodemask(&cpuset_context_nmask, nodes,
+ &cpuset_current_mems_allowed);
+ else
+ nodes_and(cpuset_context_nmask, *nodes,
+ cpuset_current_mems_allowed);
+ if (mpol_store_user_nodemask(policy))
+ policy->w.user_nodemask = *nodes;
+ else
+ policy->w.cpuset_mems_allowed =
+ cpuset_mems_allowed(current);
+ }
+
+ ret = mpol_ops[mode].create(policy,
+ nodes ? &cpuset_context_nmask : NULL);
+ if (ret < 0) {
+ kmem_cache_free(policy_cache, policy);
+ return ERR_PTR(ret);
}
- policy->policy = mode;
- policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
return policy;
}
+/* Slow path of a mpol destructor. */
+void __mpol_put(struct mempolicy *p)
+{
+ if (!atomic_dec_and_test(&p->refcnt))
+ return;
+ kmem_cache_free(policy_cache, p);
+}
+
+static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
+{
+}
+
+static void mpol_rebind_nodemask(struct mempolicy *pol,
+ const nodemask_t *nodes)
+{
+ nodemask_t tmp;
+
+ if (pol->flags & MPOL_F_STATIC_NODES)
+ nodes_and(tmp, pol->w.user_nodemask, *nodes);
+ else if (pol->flags & MPOL_F_RELATIVE_NODES)
+ mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
+ else {
+ nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed,
+ *nodes);
+ pol->w.cpuset_mems_allowed = *nodes;
+ }
+
+ pol->v.nodes = tmp;
+ if (!node_isset(current->il_next, tmp)) {
+ current->il_next = next_node(current->il_next, tmp);
+ if (current->il_next >= MAX_NUMNODES)
+ current->il_next = first_node(tmp);
+ if (current->il_next >= MAX_NUMNODES)
+ current->il_next = numa_node_id();
+ }
+}
+
+static void mpol_rebind_preferred(struct mempolicy *pol,
+ const nodemask_t *nodes)
+{
+ nodemask_t tmp;
+
+ if (pol->flags & MPOL_F_STATIC_NODES) {
+ int node = first_node(pol->w.user_nodemask);
+
+ if (node_isset(node, *nodes)) {
+ pol->v.preferred_node = node;
+ pol->flags &= ~MPOL_F_LOCAL;
+ } else
+ pol->flags |= MPOL_F_LOCAL;
+ } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
+ mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
+ pol->v.preferred_node = first_node(tmp);
+ } else if (!(pol->flags & MPOL_F_LOCAL)) {
+ pol->v.preferred_node = node_remap(pol->v.preferred_node,
+ pol->w.cpuset_mems_allowed,
+ *nodes);
+ pol->w.cpuset_mems_allowed = *nodes;
+ }
+}
+
+/* Migrate a policy to a different set of nodes */
+static void mpol_rebind_policy(struct mempolicy *pol,
+ const nodemask_t *newmask)
+{
+ if (!pol)
+ return;
+ if (!mpol_store_user_nodemask(pol) &&
+ nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
+ return;
+ mpol_ops[pol->mode].rebind(pol, newmask);
+}
+
+/*
+ * Wrapper for mpol_rebind_policy() that just requires task
+ * pointer, and updates task mempolicy.
+ */
+
+void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
+{
+ mpol_rebind_policy(tsk->mempolicy, new);
+}
+
+/*
+ * Rebind each vma in mm to new nodemask.
+ *
+ * Call holding a reference to mm. Takes mm->mmap_sem during call.
+ */
+
+void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
+{
+ struct vm_area_struct *vma;
+
+ down_write(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
+ mpol_rebind_policy(vma->vm_policy, new);
+ up_write(&mm->mmap_sem);
+}
+
+static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
+ [MPOL_DEFAULT] = {
+ .rebind = mpol_rebind_default,
+ },
+ [MPOL_INTERLEAVE] = {
+ .create = mpol_new_interleave,
+ .rebind = mpol_rebind_nodemask,
+ },
+ [MPOL_PREFERRED] = {
+ .create = mpol_new_preferred,
+ .rebind = mpol_rebind_preferred,
+ },
+ [MPOL_BIND] = {
+ .create = mpol_new_bind,
+ .rebind = mpol_rebind_nodemask,
+ },
+};
+
static void gather_stats(struct page *, void *, int pte_dirty);
static void migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags);
@@ -421,7 +543,7 @@ static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
if (!err) {
mpol_get(new);
vma->vm_policy = new;
- mpol_free(old);
+ mpol_put(old);
}
return err;
}
@@ -479,46 +601,55 @@ static void mpol_set_task_struct_flag(void)
}
/* Set the process memory policy */
-static long do_set_mempolicy(int mode, nodemask_t *nodes)
+static long do_set_mempolicy(unsigned short mode, unsigned short flags,
+ nodemask_t *nodes)
{
struct mempolicy *new;
+ struct mm_struct *mm = current->mm;
- if (mpol_check_policy(mode, nodes))
- return -EINVAL;
- new = mpol_new(mode, nodes);
+ new = mpol_new(mode, flags, nodes);
if (IS_ERR(new))
return PTR_ERR(new);
- mpol_free(current->mempolicy);
+
+ /*
+ * prevent changing our mempolicy while show_numa_maps()
+ * is using it.
+ * Note: do_set_mempolicy() can be called at init time
+ * with no 'mm'.
+ */
+ if (mm)
+ down_write(&mm->mmap_sem);
+ mpol_put(current->mempolicy);
current->mempolicy = new;
mpol_set_task_struct_flag();
- if (new && new->policy == MPOL_INTERLEAVE)
+ if (new && new->mode == MPOL_INTERLEAVE &&
+ nodes_weight(new->v.nodes))
current->il_next = first_node(new->v.nodes);
+ if (mm)
+ up_write(&mm->mmap_sem);
+
return 0;
}
-/* Fill a zone bitmap for a policy */
-static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
+/*
+ * Return nodemask for policy for get_mempolicy() query
+ */
+static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
{
- int i;
-
nodes_clear(*nodes);
- switch (p->policy) {
+ if (p == &default_policy)
+ return;
+
+ switch (p->mode) {
case MPOL_BIND:
- for (i = 0; p->v.zonelist->zones[i]; i++)
- node_set(zone_to_nid(p->v.zonelist->zones[i]),
- *nodes);
- break;
- case MPOL_DEFAULT:
- break;
+ /* Fall through */
case MPOL_INTERLEAVE:
*nodes = p->v.nodes;
break;
case MPOL_PREFERRED:
- /* or use current node instead of memory_map? */
- if (p->v.preferred_node < 0)
- *nodes = node_states[N_HIGH_MEMORY];
- else
+ if (!(p->flags & MPOL_F_LOCAL))
node_set(p->v.preferred_node, *nodes);
+ /* else return empty node mask for local allocation */
break;
default:
BUG();
@@ -561,6 +692,11 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
}
if (flags & MPOL_F_ADDR) {
+ /*
+ * Do NOT fall back to task policy if the
+ * vma/shared policy at addr is NULL. We
+ * want to return MPOL_DEFAULT in this case.
+ */
down_read(&mm->mmap_sem);
vma = find_vma_intersection(mm, addr, addr+1);
if (!vma) {
@@ -575,7 +711,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
return -EINVAL;
if (!pol)
- pol = &default_policy;
+ pol = &default_policy; /* indicates default behavior */
if (flags & MPOL_F_NODE) {
if (flags & MPOL_F_ADDR) {
@@ -584,14 +720,17 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
goto out;
*policy = err;
} else if (pol == current->mempolicy &&
- pol->policy == MPOL_INTERLEAVE) {
+ pol->mode == MPOL_INTERLEAVE) {
*policy = current->il_next;
} else {
err = -EINVAL;
goto out;
}
- } else
- *policy = pol->policy;
+ } else {
+ *policy = pol == &default_policy ? MPOL_DEFAULT :
+ pol->mode;
+ *policy |= pol->flags;
+ }
if (vma) {
up_read(&current->mm->mmap_sem);
@@ -600,9 +739,10 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
err = 0;
if (nmask)
- get_zonemask(pol, nmask);
+ get_policy_nodemask(pol, nmask);
out:
+ mpol_cond_put(pol);
if (vma)
up_read(&current->mm->mmap_sem);
return err;
@@ -664,7 +804,7 @@ int do_migrate_pages(struct mm_struct *mm,
int err = 0;
nodemask_t tmp;
- down_read(&mm->mmap_sem);
+ down_read(&mm->mmap_sem);
err = migrate_vmas(mm, from_nodes, to_nodes, flags);
if (err)
@@ -781,8 +921,8 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int *
#endif
static long do_mbind(unsigned long start, unsigned long len,
- unsigned long mode, nodemask_t *nmask,
- unsigned long flags)
+ unsigned short mode, unsigned short mode_flags,
+ nodemask_t *nmask, unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
@@ -791,9 +931,8 @@ static long do_mbind(unsigned long start, unsigned long len,
int err;
LIST_HEAD(pagelist);
- if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
- MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
- || mode > MPOL_MAX)
+ if (flags & ~(unsigned long)(MPOL_MF_STRICT |
+ MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
return -EINVAL;
if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
return -EPERM;
@@ -812,10 +951,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (end == start)
return 0;
- if (mpol_check_policy(mode, nmask))
- return -EINVAL;
-
- new = mpol_new(mode, nmask);
+ new = mpol_new(mode, mode_flags, nmask);
if (IS_ERR(new))
return PTR_ERR(new);
@@ -826,8 +962,9 @@ static long do_mbind(unsigned long start, unsigned long len,
if (!new)
flags |= MPOL_MF_DISCONTIG_OK;
- pr_debug("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
- mode, nmask ? nodes_addr(*nmask)[0] : -1);
+ pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
+ start, start + len, mode, mode_flags,
+ nmask ? nodes_addr(*nmask)[0] : -1);
down_write(&mm->mmap_sem);
vma = check_range(mm, start, end, nmask,
@@ -848,7 +985,7 @@ static long do_mbind(unsigned long start, unsigned long len,
}
up_write(&mm->mmap_sem);
- mpol_free(new);
+ mpol_put(new);
return err;
}
@@ -926,11 +1063,19 @@ asmlinkage long sys_mbind(unsigned long start, unsigned long len,
{
nodemask_t nodes;
int err;
+ unsigned short mode_flags;
+ mode_flags = mode & MPOL_MODE_FLAGS;
+ mode &= ~MPOL_MODE_FLAGS;
+ if (mode >= MPOL_MAX)
+ return -EINVAL;
+ if ((mode_flags & MPOL_F_STATIC_NODES) &&
+ (mode_flags & MPOL_F_RELATIVE_NODES))
+ return -EINVAL;
err = get_nodes(&nodes, nmask, maxnode);
if (err)
return err;
- return do_mbind(start, len, mode, &nodes, flags);
+ return do_mbind(start, len, mode, mode_flags, &nodes, flags);
}
/* Set the process memory policy */
@@ -939,13 +1084,18 @@ asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
{
int err;
nodemask_t nodes;
+ unsigned short flags;
- if (mode < 0 || mode > MPOL_MAX)
+ flags = mode & MPOL_MODE_FLAGS;
+ mode &= ~MPOL_MODE_FLAGS;
+ if ((unsigned int)mode >= MPOL_MAX)
+ return -EINVAL;
+ if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
return -EINVAL;
err = get_nodes(&nodes, nmask, maxnode);
if (err)
return err;
- return do_set_mempolicy(mode, &nodes);
+ return do_set_mempolicy(mode, flags, &nodes);
}
asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
@@ -1131,59 +1281,75 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
*
* Returns effective policy for a VMA at specified address.
* Falls back to @task or system default policy, as necessary.
- * Returned policy has extra reference count if shared, vma,
- * or some other task's policy [show_numa_maps() can pass
- * @task != current]. It is the caller's responsibility to
- * free the reference in these cases.
+ * Current or other task's task mempolicy and non-shared vma policies
+ * are protected by the task's mmap_sem, which must be held for read by
+ * the caller.
+ * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
+ * count--added by the get_policy() vm_op, as appropriate--to protect against
+ * freeing by another task. It is the caller's responsibility to free the
+ * extra reference for shared policies.
*/
-static struct mempolicy * get_vma_policy(struct task_struct *task,
+static struct mempolicy *get_vma_policy(struct task_struct *task,
struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol = task->mempolicy;
- int shared_pol = 0;
if (vma) {
if (vma->vm_ops && vma->vm_ops->get_policy) {
- pol = vma->vm_ops->get_policy(vma, addr);
- shared_pol = 1; /* if pol non-NULL, add ref below */
- } else if (vma->vm_policy &&
- vma->vm_policy->policy != MPOL_DEFAULT)
+ struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
+ addr);
+ if (vpol)
+ pol = vpol;
+ } else if (vma->vm_policy)
pol = vma->vm_policy;
}
if (!pol)
pol = &default_policy;
- else if (!shared_pol && pol != current->mempolicy)
- mpol_get(pol); /* vma or other task's policy */
return pol;
}
-/* Return a zonelist representing a mempolicy */
-static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
+/*
+ * Return a nodemask representing a mempolicy for filtering nodes for
+ * page allocation
+ */
+static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
{
- int nd;
+ /* Lower zones don't get a nodemask applied for MPOL_BIND */
+ if (unlikely(policy->mode == MPOL_BIND) &&
+ gfp_zone(gfp) >= policy_zone &&
+ cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
+ return &policy->v.nodes;
- switch (policy->policy) {
+ return NULL;
+}
+
+/* Return a zonelist indicated by gfp for node representing a mempolicy */
+static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
+{
+ int nd = numa_node_id();
+
+ switch (policy->mode) {
case MPOL_PREFERRED:
- nd = policy->v.preferred_node;
- if (nd < 0)
- nd = numa_node_id();
+ if (!(policy->flags & MPOL_F_LOCAL))
+ nd = policy->v.preferred_node;
break;
case MPOL_BIND:
- /* Lower zones don't get a policy applied */
- /* Careful: current->mems_allowed might have moved */
- if (gfp_zone(gfp) >= policy_zone)
- if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
- return policy->v.zonelist;
- /*FALL THROUGH*/
+ /*
+ * Normally, MPOL_BIND allocations are node-local within the
+ * allowed nodemask. However, if __GFP_THISNODE is set and the
+ * current node is part of the mask, we use the zonelist for
+ * the first node in the mask instead.
+ */
+ if (unlikely(gfp & __GFP_THISNODE) &&
+ unlikely(!node_isset(nd, policy->v.nodes)))
+ nd = first_node(policy->v.nodes);
+ break;
case MPOL_INTERLEAVE: /* should not happen */
- case MPOL_DEFAULT:
- nd = numa_node_id();
break;
default:
- nd = 0;
BUG();
}
- return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
+ return node_zonelist(nd, gfp);
}
/* Do dynamic interleaving for a process */
@@ -1196,36 +1362,51 @@ static unsigned interleave_nodes(struct mempolicy *policy)
next = next_node(nid, policy->v.nodes);
if (next >= MAX_NUMNODES)
next = first_node(policy->v.nodes);
- me->il_next = next;
+ if (next < MAX_NUMNODES)
+ me->il_next = next;
return nid;
}
/*
* Depending on the memory policy provide a node from which to allocate the
* next slab entry.
+ * @policy must be protected by freeing by the caller. If @policy is
+ * the current task's mempolicy, this protection is implicit, as only the
+ * task can change it's policy. The system default policy requires no
+ * such protection.
*/
unsigned slab_node(struct mempolicy *policy)
{
- int pol = policy ? policy->policy : MPOL_DEFAULT;
+ if (!policy || policy->flags & MPOL_F_LOCAL)
+ return numa_node_id();
+
+ switch (policy->mode) {
+ case MPOL_PREFERRED:
+ /*
+ * handled MPOL_F_LOCAL above
+ */
+ return policy->v.preferred_node;
- switch (pol) {
case MPOL_INTERLEAVE:
return interleave_nodes(policy);
- case MPOL_BIND:
+ case MPOL_BIND: {
/*
* Follow bind policy behavior and start allocation at the
* first node.
*/
- return zone_to_nid(policy->v.zonelist->zones[0]);
-
- case MPOL_PREFERRED:
- if (policy->v.preferred_node >= 0)
- return policy->v.preferred_node;
- /* Fall through */
+ struct zonelist *zonelist;
+ struct zone *zone;
+ enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
+ zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
+ (void)first_zones_zonelist(zonelist, highest_zoneidx,
+ &policy->v.nodes,
+ &zone);
+ return zone->node;
+ }
default:
- return numa_node_id();
+ BUG();
}
}
@@ -1234,10 +1415,13 @@ static unsigned offset_il_node(struct mempolicy *pol,
struct vm_area_struct *vma, unsigned long off)
{
unsigned nnodes = nodes_weight(pol->v.nodes);
- unsigned target = (unsigned)off % nnodes;
+ unsigned target;
int c;
int nid = -1;
+ if (!nnodes)
+ return numa_node_id();
+ target = (unsigned int)off % nnodes;
c = 0;
do {
nid = next_node(nid, pol->v.nodes);
@@ -1274,40 +1458,30 @@ static inline unsigned interleave_nid(struct mempolicy *pol,
* @vma = virtual memory area whose policy is sought
* @addr = address in @vma for shared policy lookup and interleave policy
* @gfp_flags = for requested zone
- * @mpol = pointer to mempolicy pointer for reference counted 'BIND policy
+ * @mpol = pointer to mempolicy pointer for reference counted mempolicy
+ * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
*
- * Returns a zonelist suitable for a huge page allocation.
- * If the effective policy is 'BIND, returns pointer to policy's zonelist.
- * If it is also a policy for which get_vma_policy() returns an extra
- * reference, we must hold that reference until after allocation.
- * In that case, return policy via @mpol so hugetlb allocation can drop
- * the reference. For non-'BIND referenced policies, we can/do drop the
- * reference here, so the caller doesn't need to know about the special case
- * for default and current task policy.
+ * Returns a zonelist suitable for a huge page allocation and a pointer
+ * to the struct mempolicy for conditional unref after allocation.
+ * If the effective policy is 'BIND, returns a pointer to the mempolicy's
+ * @nodemask for filtering the zonelist.
*/
struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
- gfp_t gfp_flags, struct mempolicy **mpol)
+ gfp_t gfp_flags, struct mempolicy **mpol,
+ nodemask_t **nodemask)
{
- struct mempolicy *pol = get_vma_policy(current, vma, addr);
struct zonelist *zl;
- *mpol = NULL; /* probably no unref needed */
- if (pol->policy == MPOL_INTERLEAVE) {
- unsigned nid;
-
- nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
- if (unlikely(pol != &default_policy &&
- pol != current->mempolicy))
- __mpol_free(pol); /* finished with pol */
- return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
- }
+ *mpol = get_vma_policy(current, vma, addr);
+ *nodemask = NULL; /* assume !MPOL_BIND */
- zl = zonelist_policy(GFP_HIGHUSER, pol);
- if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
- if (pol->policy != MPOL_BIND)
- __mpol_free(pol); /* finished with pol */
- else
- *mpol = pol; /* unref needed after allocation */
+ if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
+ zl = node_zonelist(interleave_nid(*mpol, vma, addr,
+ HPAGE_SHIFT), gfp_flags);
+ } else {
+ zl = policy_zonelist(gfp_flags, *mpol);
+ if ((*mpol)->mode == MPOL_BIND)
+ *nodemask = &(*mpol)->v.nodes;
}
return zl;
}
@@ -1321,9 +1495,9 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
struct zonelist *zl;
struct page *page;
- zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
+ zl = node_zonelist(nid, gfp);
page = __alloc_pages(gfp, order, zl);
- if (page && page_zone(page) == zl->zones[0])
+ if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
return page;
}
@@ -1358,28 +1532,27 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
cpuset_update_task_memory_state();
- if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
+ if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
unsigned nid;
nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
- if (unlikely(pol != &default_policy &&
- pol != current->mempolicy))
- __mpol_free(pol); /* finished with pol */
+ mpol_cond_put(pol);
return alloc_page_interleave(gfp, 0, nid);
}
- zl = zonelist_policy(gfp, pol);
- if (pol != &default_policy && pol != current->mempolicy) {
+ zl = policy_zonelist(gfp, pol);
+ if (unlikely(mpol_needs_cond_ref(pol))) {
/*
- * slow path: ref counted policy -- shared or vma
+ * slow path: ref counted shared policy
*/
- struct page *page = __alloc_pages(gfp, 0, zl);
- __mpol_free(pol);
+ struct page *page = __alloc_pages_nodemask(gfp, 0,
+ zl, policy_nodemask(gfp, pol));
+ __mpol_put(pol);
return page;
}
/*
* fast path: default or task policy
*/
- return __alloc_pages(gfp, 0, zl);
+ return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
}
/**
@@ -1409,22 +1582,28 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
cpuset_update_task_memory_state();
if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
pol = &default_policy;
- if (pol->policy == MPOL_INTERLEAVE)
+
+ /*
+ * No reference counting needed for current->mempolicy
+ * nor system default_policy
+ */
+ if (pol->mode == MPOL_INTERLEAVE)
return alloc_page_interleave(gfp, order, interleave_nodes(pol));
- return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
+ return __alloc_pages_nodemask(gfp, order,
+ policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
}
EXPORT_SYMBOL(alloc_pages_current);
/*
- * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
+ * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
* rebinds the mempolicy its copying by calling mpol_rebind_policy()
* with the mems_allowed returned by cpuset_mems_allowed(). This
* keeps mempolicies cpuset relative after its cpuset moves. See
* further kernel/cpuset.c update_nodemask().
*/
-/* Slow path of a mempolicy copy */
-struct mempolicy *__mpol_copy(struct mempolicy *old)
+/* Slow path of a mempolicy duplicate */
+struct mempolicy *__mpol_dup(struct mempolicy *old)
{
struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
@@ -1436,55 +1615,64 @@ struct mempolicy *__mpol_copy(struct mempolicy *old)
}
*new = *old;
atomic_set(&new->refcnt, 1);
- if (new->policy == MPOL_BIND) {
- int sz = ksize(old->v.zonelist);
- new->v.zonelist = kmemdup(old->v.zonelist, sz, GFP_KERNEL);
- if (!new->v.zonelist) {
- kmem_cache_free(policy_cache, new);
- return ERR_PTR(-ENOMEM);
- }
- }
return new;
}
+/*
+ * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
+ * eliminate the * MPOL_F_* flags that require conditional ref and
+ * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly
+ * after return. Use the returned value.
+ *
+ * Allows use of a mempolicy for, e.g., multiple allocations with a single
+ * policy lookup, even if the policy needs/has extra ref on lookup.
+ * shmem_readahead needs this.
+ */
+struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
+ struct mempolicy *frompol)
+{
+ if (!mpol_needs_cond_ref(frompol))
+ return frompol;
+
+ *tompol = *frompol;
+ tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */
+ __mpol_put(frompol);
+ return tompol;
+}
+
+static int mpol_match_intent(const struct mempolicy *a,
+ const struct mempolicy *b)
+{
+ if (a->flags != b->flags)
+ return 0;
+ if (!mpol_store_user_nodemask(a))
+ return 1;
+ return nodes_equal(a->w.user_nodemask, b->w.user_nodemask);
+}
+
/* Slow path of a mempolicy comparison */
int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
if (!a || !b)
return 0;
- if (a->policy != b->policy)
+ if (a->mode != b->mode)
return 0;
- switch (a->policy) {
- case MPOL_DEFAULT:
- return 1;
+ if (a->mode != MPOL_DEFAULT && !mpol_match_intent(a, b))
+ return 0;
+ switch (a->mode) {
+ case MPOL_BIND:
+ /* Fall through */
case MPOL_INTERLEAVE:
return nodes_equal(a->v.nodes, b->v.nodes);
case MPOL_PREFERRED:
- return a->v.preferred_node == b->v.preferred_node;
- case MPOL_BIND: {
- int i;
- for (i = 0; a->v.zonelist->zones[i]; i++)
- if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i])
- return 0;
- return b->v.zonelist->zones[i] == NULL;
- }
+ return a->v.preferred_node == b->v.preferred_node &&
+ a->flags == b->flags;
default:
BUG();
return 0;
}
}
-/* Slow path of a mpol destructor. */
-void __mpol_free(struct mempolicy *p)
-{
- if (!atomic_dec_and_test(&p->refcnt))
- return;
- if (p->policy == MPOL_BIND)
- kfree(p->v.zonelist);
- p->policy = MPOL_DEFAULT;
- kmem_cache_free(policy_cache, p);
-}
-
/*
* Shared memory backing store policy support.
*
@@ -1547,7 +1735,7 @@ static void sp_insert(struct shared_policy *sp, struct sp_node *new)
rb_link_node(&new->nd, parent, p);
rb_insert_color(&new->nd, &sp->root);
pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
- new->policy ? new->policy->policy : 0);
+ new->policy ? new->policy->mode : 0);
}
/* Find shared policy intersecting idx */
@@ -1573,7 +1761,7 @@ static void sp_delete(struct shared_policy *sp, struct sp_node *n)
{
pr_debug("deleting %lx-l%lx\n", n->start, n->end);
rb_erase(&n->nd, &sp->root);
- mpol_free(n->policy);
+ mpol_put(n->policy);
kmem_cache_free(sn_cache, n);
}
@@ -1587,6 +1775,7 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
n->start = start;
n->end = end;
mpol_get(pol);
+ pol->flags |= MPOL_F_SHARED; /* for unref */
n->policy = pol;
return n;
}
@@ -1633,33 +1822,41 @@ restart:
sp_insert(sp, new);
spin_unlock(&sp->lock);
if (new2) {
- mpol_free(new2->policy);
+ mpol_put(new2->policy);
kmem_cache_free(sn_cache, new2);
}
return 0;
}
-void mpol_shared_policy_init(struct shared_policy *info, int policy,
- nodemask_t *policy_nodes)
-{
- info->root = RB_ROOT;
- spin_lock_init(&info->lock);
-
- if (policy != MPOL_DEFAULT) {
- struct mempolicy *newpol;
-
- /* Falls back to MPOL_DEFAULT on any error */
- newpol = mpol_new(policy, policy_nodes);
- if (!IS_ERR(newpol)) {
- /* Create pseudo-vma that contains just the policy */
- struct vm_area_struct pvma;
-
- memset(&pvma, 0, sizeof(struct vm_area_struct));
- /* Policy covers entire file */
- pvma.vm_end = TASK_SIZE;
- mpol_set_shared_policy(info, &pvma, newpol);
- mpol_free(newpol);
- }
+/**
+ * mpol_shared_policy_init - initialize shared policy for inode
+ * @sp: pointer to inode shared policy
+ * @mpol: struct mempolicy to install
+ *
+ * Install non-NULL @mpol in inode's shared policy rb-tree.
+ * On entry, the current task has a reference on a non-NULL @mpol.
+ * This must be released on exit.
+ */
+void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
+{
+ sp->root = RB_ROOT; /* empty tree == default mempolicy */
+ spin_lock_init(&sp->lock);
+
+ if (mpol) {
+ struct vm_area_struct pvma;
+ struct mempolicy *new;
+
+ /* contextualize the tmpfs mount point mempolicy */
+ new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
+ mpol_put(mpol); /* drop our ref on sb mpol */
+ if (IS_ERR(new))
+ return; /* no valid nodemask intersection */
+
+ /* Create pseudo-vma that contains just the policy */
+ memset(&pvma, 0, sizeof(struct vm_area_struct));
+ pvma.vm_end = TASK_SIZE; /* policy covers entire file */
+ mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
+ mpol_put(new); /* drop initial ref */
}
}
@@ -1670,9 +1867,10 @@ int mpol_set_shared_policy(struct shared_policy *info,
struct sp_node *new = NULL;
unsigned long sz = vma_pages(vma);
- pr_debug("set_shared_policy %lx sz %lu %d %lx\n",
+ pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
vma->vm_pgoff,
- sz, npol? npol->policy : -1,
+ sz, npol ? npol->mode : -1,
+ npol ? npol->flags : -1,
npol ? nodes_addr(npol->v.nodes)[0] : -1);
if (npol) {
@@ -1700,7 +1898,7 @@ void mpol_free_shared_policy(struct shared_policy *p)
n = rb_entry(next, struct sp_node, nd);
next = rb_next(&n->nd);
rb_erase(&n->nd, &p->root);
- mpol_free(n->policy);
+ mpol_put(n->policy);
kmem_cache_free(sn_cache, n);
}
spin_unlock(&p->lock);
@@ -1745,120 +1943,177 @@ void __init numa_policy_init(void)
if (unlikely(nodes_empty(interleave_nodes)))
node_set(prefer, interleave_nodes);
- if (do_set_mempolicy(MPOL_INTERLEAVE, &interleave_nodes))
+ if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
printk("numa_policy_init: interleaving failed\n");
}
/* Reset policy of current process to default */
void numa_default_policy(void)
{
- do_set_mempolicy(MPOL_DEFAULT, NULL);
+ do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
}
-/* Migrate a policy to a different set of nodes */
-static void mpol_rebind_policy(struct mempolicy *pol,
- const nodemask_t *newmask)
-{
- nodemask_t *mpolmask;
- nodemask_t tmp;
+/*
+ * Parse and format mempolicy from/to strings
+ */
- if (!pol)
- return;
- mpolmask = &pol->cpuset_mems_allowed;
- if (nodes_equal(*mpolmask, *newmask))
- return;
+/*
+ * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag
+ * Used only for mpol_parse_str() and mpol_to_str()
+ */
+#define MPOL_LOCAL (MPOL_INTERLEAVE + 1)
+static const char * const policy_types[] =
+ { "default", "prefer", "bind", "interleave", "local" };
- switch (pol->policy) {
- case MPOL_DEFAULT:
- break;
- case MPOL_INTERLEAVE:
- nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
- pol->v.nodes = tmp;
- *mpolmask = *newmask;
- current->il_next = node_remap(current->il_next,
- *mpolmask, *newmask);
- break;
- case MPOL_PREFERRED:
- pol->v.preferred_node = node_remap(pol->v.preferred_node,
- *mpolmask, *newmask);
- *mpolmask = *newmask;
- break;
- case MPOL_BIND: {
- nodemask_t nodes;
- struct zone **z;
- struct zonelist *zonelist;
+#ifdef CONFIG_TMPFS
+/**
+ * mpol_parse_str - parse string to mempolicy
+ * @str: string containing mempolicy to parse
+ * @mpol: pointer to struct mempolicy pointer, returned on success.
+ * @no_context: flag whether to "contextualize" the mempolicy
+ *
+ * Format of input:
+ * <mode>[=<flags>][:<nodelist>]
+ *
+ * if @no_context is true, save the input nodemask in w.user_nodemask in
+ * the returned mempolicy. This will be used to "clone" the mempolicy in
+ * a specific context [cpuset] at a later time. Used to parse tmpfs mpol
+ * mount option. Note that if 'static' or 'relative' mode flags were
+ * specified, the input nodemask will already have been saved. Saving
+ * it again is redundant, but safe.
+ *
+ * On success, returns 0, else 1
+ */
+int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
+{
+ struct mempolicy *new = NULL;
+ unsigned short uninitialized_var(mode);
+ unsigned short uninitialized_var(mode_flags);
+ nodemask_t nodes;
+ char *nodelist = strchr(str, ':');
+ char *flags = strchr(str, '=');
+ int i;
+ int err = 1;
+
+ if (nodelist) {
+ /* NUL-terminate mode or flags string */
+ *nodelist++ = '\0';
+ if (nodelist_parse(nodelist, nodes))
+ goto out;
+ if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
+ goto out;
+ } else
nodes_clear(nodes);
- for (z = pol->v.zonelist->zones; *z; z++)
- node_set(zone_to_nid(*z), nodes);
- nodes_remap(tmp, nodes, *mpolmask, *newmask);
- nodes = tmp;
- zonelist = bind_zonelist(&nodes);
+ if (flags)
+ *flags++ = '\0'; /* terminate mode string */
- /* If no mem, then zonelist is NULL and we keep old zonelist.
- * If that old zonelist has no remaining mems_allowed nodes,
- * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
- */
+ for (i = 0; i <= MPOL_LOCAL; i++) {
+ if (!strcmp(str, policy_types[i])) {
+ mode = i;
+ break;
+ }
+ }
+ if (i > MPOL_LOCAL)
+ goto out;
- if (!IS_ERR(zonelist)) {
- /* Good - got mem - substitute new zonelist */
- kfree(pol->v.zonelist);
- pol->v.zonelist = zonelist;
+ switch (mode) {
+ case MPOL_PREFERRED:
+ /*
+ * Insist on a nodelist of one node only
+ */
+ if (nodelist) {
+ char *rest = nodelist;
+ while (isdigit(*rest))
+ rest++;
+ if (!*rest)
+ err = 0;
}
- *mpolmask = *newmask;
break;
- }
- default:
- BUG();
+ case MPOL_INTERLEAVE:
+ /*
+ * Default to online nodes with memory if no nodelist
+ */
+ if (!nodelist)
+ nodes = node_states[N_HIGH_MEMORY];
+ err = 0;
+ break;
+ case MPOL_LOCAL:
+ /*
+ * Don't allow a nodelist; mpol_new() checks flags
+ */
+ if (nodelist)
+ goto out;
+ mode = MPOL_PREFERRED;
break;
- }
-}
-
-/*
- * Wrapper for mpol_rebind_policy() that just requires task
- * pointer, and updates task mempolicy.
- */
-void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
-{
- mpol_rebind_policy(tsk->mempolicy, new);
-}
+ /*
+ * case MPOL_BIND: mpol_new() enforces non-empty nodemask.
+ * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags.
+ */
+ }
-/*
- * Rebind each vma in mm to new nodemask.
- *
- * Call holding a reference to mm. Takes mm->mmap_sem during call.
- */
+ mode_flags = 0;
+ if (flags) {
+ /*
+ * Currently, we only support two mutually exclusive
+ * mode flags.
+ */
+ if (!strcmp(flags, "static"))
+ mode_flags |= MPOL_F_STATIC_NODES;
+ else if (!strcmp(flags, "relative"))
+ mode_flags |= MPOL_F_RELATIVE_NODES;
+ else
+ err = 1;
+ }
-void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
-{
- struct vm_area_struct *vma;
+ new = mpol_new(mode, mode_flags, &nodes);
+ if (IS_ERR(new))
+ err = 1;
+ else if (no_context)
+ new->w.user_nodemask = nodes; /* save for contextualization */
- down_write(&mm->mmap_sem);
- for (vma = mm->mmap; vma; vma = vma->vm_next)
- mpol_rebind_policy(vma->vm_policy, new);
- up_write(&mm->mmap_sem);
+out:
+ /* Restore string for error message */
+ if (nodelist)
+ *--nodelist = ':';
+ if (flags)
+ *--flags = '=';
+ if (!err)
+ *mpol = new;
+ return err;
}
+#endif /* CONFIG_TMPFS */
-/*
- * Display pages allocated per node and memory policy via /proc.
- */
-
-static const char * const policy_types[] =
- { "default", "prefer", "bind", "interleave" };
-
-/*
+/**
+ * mpol_to_str - format a mempolicy structure for printing
+ * @buffer: to contain formatted mempolicy string
+ * @maxlen: length of @buffer
+ * @pol: pointer to mempolicy to be formatted
+ * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask
+ *
* Convert a mempolicy into a string.
* Returns the number of characters in buffer (if positive)
* or an error (negative)
*/
-static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
+int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
{
char *p = buffer;
int l;
nodemask_t nodes;
- int mode = pol ? pol->policy : MPOL_DEFAULT;
+ unsigned short mode;
+ unsigned short flags = pol ? pol->flags : 0;
+
+ /*
+ * Sanity check: room for longest mode, flag and some nodes
+ */
+ VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
+
+ if (!pol || pol == &default_policy)
+ mode = MPOL_DEFAULT;
+ else
+ mode = pol->mode;
switch (mode) {
case MPOL_DEFAULT:
@@ -1867,33 +2122,50 @@ static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
case MPOL_PREFERRED:
nodes_clear(nodes);
- node_set(pol->v.preferred_node, nodes);
+ if (flags & MPOL_F_LOCAL)
+ mode = MPOL_LOCAL; /* pseudo-policy */
+ else
+ node_set(pol->v.preferred_node, nodes);
break;
case MPOL_BIND:
- get_zonemask(pol, &nodes);
- break;
-
+ /* Fall through */
case MPOL_INTERLEAVE:
- nodes = pol->v.nodes;
+ if (no_context)
+ nodes = pol->w.user_nodemask;
+ else
+ nodes = pol->v.nodes;
break;
default:
BUG();
- return -EFAULT;
}
l = strlen(policy_types[mode]);
- if (buffer + maxlen < p + l + 1)
- return -ENOSPC;
+ if (buffer + maxlen < p + l + 1)
+ return -ENOSPC;
strcpy(p, policy_types[mode]);
p += l;
- if (!nodes_empty(nodes)) {
+ if (flags & MPOL_MODE_FLAGS) {
if (buffer + maxlen < p + 2)
return -ENOSPC;
*p++ = '=';
+
+ /*
+ * Currently, the only defined flags are mutually exclusive
+ */
+ if (flags & MPOL_F_STATIC_NODES)
+ p += snprintf(p, buffer + maxlen - p, "static");
+ else if (flags & MPOL_F_RELATIVE_NODES)
+ p += snprintf(p, buffer + maxlen - p, "relative");
+ }
+
+ if (!nodes_empty(nodes)) {
+ if (buffer + maxlen < p + 2)
+ return -ENOSPC;
+ *p++ = ':';
p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
}
return p - buffer;
@@ -1971,6 +2243,9 @@ static inline void check_huge_range(struct vm_area_struct *vma,
}
#endif
+/*
+ * Display pages allocated per node and memory policy via /proc.
+ */
int show_numa_map(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
@@ -1990,12 +2265,8 @@ int show_numa_map(struct seq_file *m, void *v)
return 0;
pol = get_vma_policy(priv->task, vma, vma->vm_start);
- mpol_to_str(buffer, sizeof(buffer), pol);
- /*
- * unref shared or other task's mempolicy
- */
- if (pol != &default_policy && pol != current->mempolicy)
- __mpol_free(pol);
+ mpol_to_str(buffer, sizeof(buffer), pol, 0);
+ mpol_cond_put(pol);
seq_printf(m, "%08lx %s", vma->vm_start, buffer);
diff --git a/mm/mincore.c b/mm/mincore.c
index 5efe0ded69b1..5178800bc129 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -33,7 +33,7 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
* When tmpfs swaps out a page from a file, any process mapping that
* file will not get a swp_entry_t in its pte, but rather it is like
* any other file mapping (ie. marked !present and faulted in with
- * tmpfs's .nopage). So swapped out tmpfs mappings are tested here.
+ * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
*
* However when tmpfs moves the page from pagecache and into swapcache,
* it is still in core, but the find_get_page below won't find it.
diff --git a/mm/mmap.c b/mm/mmap.c
index a32d28ce31cd..677d184b0d42 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -232,7 +232,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
vma->vm_ops->close(vma);
if (vma->vm_file)
fput(vma->vm_file);
- mpol_free(vma_policy(vma));
+ mpol_put(vma_policy(vma));
kmem_cache_free(vm_area_cachep, vma);
return next;
}
@@ -626,7 +626,7 @@ again: remove_next = 1 + (end > next->vm_end);
if (file)
fput(file);
mm->map_count--;
- mpol_free(vma_policy(next));
+ mpol_put(vma_policy(next));
kmem_cache_free(vm_area_cachep, next);
/*
* In mprotect's case 6 (see comments on vma_merge),
@@ -1068,7 +1068,6 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
mapping_cap_account_dirty(vma->vm_file->f_mapping);
}
-
unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, unsigned long flags,
unsigned int vm_flags, unsigned long pgoff,
@@ -1181,22 +1180,20 @@ munmap_back:
if (vma_wants_writenotify(vma))
vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
- if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
+ if (file && vma_merge(mm, prev, addr, vma->vm_end,
vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
- file = vma->vm_file;
- vma_link(mm, vma, prev, rb_link, rb_parent);
- if (correct_wcount)
- atomic_inc(&inode->i_writecount);
- } else {
- if (file) {
- if (correct_wcount)
- atomic_inc(&inode->i_writecount);
- fput(file);
- }
- mpol_free(vma_policy(vma));
+ mpol_put(vma_policy(vma));
kmem_cache_free(vm_area_cachep, vma);
+ fput(file);
+ } else {
+ vma_link(mm, vma, prev, rb_link, rb_parent);
+ file = vma->vm_file;
}
-out:
+
+ /* Once vma denies write, undo our temporary denial count */
+ if (correct_wcount)
+ atomic_inc(&inode->i_writecount);
+out:
mm->total_vm += len >> PAGE_SHIFT;
vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
if (vm_flags & VM_LOCKED) {
@@ -1813,7 +1810,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
- pol = mpol_copy(vma_policy(vma));
+ pol = mpol_dup(vma_policy(vma));
if (IS_ERR(pol)) {
kmem_cache_free(vm_area_cachep, new);
return PTR_ERR(pol);
@@ -2129,7 +2126,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (new_vma) {
*new_vma = *vma;
- pol = mpol_copy(vma_policy(vma));
+ pol = mpol_dup(vma_policy(vma));
if (IS_ERR(pol)) {
kmem_cache_free(vm_area_cachep, new_vma);
return NULL;
diff --git a/mm/mmzone.c b/mm/mmzone.c
index eb5838634f18..486ed595ee6f 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -42,3 +42,33 @@ struct zone *next_zone(struct zone *zone)
return zone;
}
+static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
+{
+#ifdef CONFIG_NUMA
+ return node_isset(zonelist_node_idx(zref), *nodes);
+#else
+ return 1;
+#endif /* CONFIG_NUMA */
+}
+
+/* Returns the next zone at or below highest_zoneidx in a zonelist */
+struct zoneref *next_zones_zonelist(struct zoneref *z,
+ enum zone_type highest_zoneidx,
+ nodemask_t *nodes,
+ struct zone **zone)
+{
+ /*
+ * Find the next suitable zone to use for the allocation.
+ * Only filter based on nodemask if it's set
+ */
+ if (likely(nodes == NULL))
+ while (zonelist_zone_idx(z) > highest_zoneidx)
+ z++;
+ else
+ while (zonelist_zone_idx(z) > highest_zoneidx ||
+ (z->zone && !zref_in_nodemask(z, nodes)))
+ z++;
+
+ *zone = zonelist_zone(z++);
+ return z;
+}
diff --git a/mm/nommu.c b/mm/nommu.c
index 5d8ae086f74e..1d32fe89d57b 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -105,7 +105,11 @@ unsigned int kobjsize(const void *objp)
{
struct page *page;
- if (!objp || !((page = virt_to_page(objp))))
+ /*
+ * If the object we have should not have ksize performed on it,
+ * return size of 0
+ */
+ if (!objp || (unsigned long)objp >= memory_end || !((page = virt_to_page(objp))))
return 0;
if (PageSlab(page))
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index beb592fe9389..8a5467ee6265 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -53,8 +53,7 @@ static DEFINE_SPINLOCK(zone_scan_mutex);
* of least surprise ... (be careful when you change it)
*/
-unsigned long badness(struct task_struct *p, unsigned long uptime,
- struct mem_cgroup *mem)
+unsigned long badness(struct task_struct *p, unsigned long uptime)
{
unsigned long points, cpu_time, run_time, s;
struct mm_struct *mm;
@@ -175,12 +174,14 @@ static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist,
gfp_t gfp_mask)
{
#ifdef CONFIG_NUMA
- struct zone **z;
+ struct zone *zone;
+ struct zoneref *z;
+ enum zone_type high_zoneidx = gfp_zone(gfp_mask);
nodemask_t nodes = node_states[N_HIGH_MEMORY];
- for (z = zonelist->zones; *z; z++)
- if (cpuset_zone_allowed_softwall(*z, gfp_mask))
- node_clear(zone_to_nid(*z), nodes);
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
+ if (cpuset_zone_allowed_softwall(zone, gfp_mask))
+ node_clear(zone_to_nid(zone), nodes);
else
return CONSTRAINT_CPUSET;
@@ -254,7 +255,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints,
if (p->oomkilladj == OOM_DISABLE)
continue;
- points = badness(p, uptime.tv_sec, mem);
+ points = badness(p, uptime.tv_sec);
if (points > *ppoints || !chosen) {
chosen = p;
*ppoints = points;
@@ -460,29 +461,29 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
* if a parallel OOM killing is already taking place that includes a zone in
* the zonelist. Otherwise, locks all zones in the zonelist and returns 1.
*/
-int try_set_zone_oom(struct zonelist *zonelist)
+int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask)
{
- struct zone **z;
+ struct zoneref *z;
+ struct zone *zone;
int ret = 1;
- z = zonelist->zones;
-
spin_lock(&zone_scan_mutex);
- do {
- if (zone_is_oom_locked(*z)) {
+ for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
+ if (zone_is_oom_locked(zone)) {
ret = 0;
goto out;
}
- } while (*(++z) != NULL);
+ }
+
+ for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
+ /*
+ * Lock each zone in the zonelist under zone_scan_mutex so a
+ * parallel invocation of try_set_zone_oom() doesn't succeed
+ * when it shouldn't.
+ */
+ zone_set_flag(zone, ZONE_OOM_LOCKED);
+ }
- /*
- * Lock each zone in the zonelist under zone_scan_mutex so a parallel
- * invocation of try_set_zone_oom() doesn't succeed when it shouldn't.
- */
- z = zonelist->zones;
- do {
- zone_set_flag(*z, ZONE_OOM_LOCKED);
- } while (*(++z) != NULL);
out:
spin_unlock(&zone_scan_mutex);
return ret;
@@ -493,16 +494,15 @@ out:
* allocation attempts with zonelists containing them may now recall the OOM
* killer, if necessary.
*/
-void clear_zonelist_oom(struct zonelist *zonelist)
+void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
{
- struct zone **z;
-
- z = zonelist->zones;
+ struct zoneref *z;
+ struct zone *zone;
spin_lock(&zone_scan_mutex);
- do {
- zone_clear_flag(*z, ZONE_OOM_LOCKED);
- } while (*(++z) != NULL);
+ for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
+ zone_clear_flag(zone, ZONE_OOM_LOCKED);
+ }
spin_unlock(&zone_scan_mutex);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 32e796af12a1..d1cf4f05dcda 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -546,7 +546,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
/*
* permit the bootmem allocator to evade page validation on high-order frees
*/
-void __init __free_pages_bootmem(struct page *page, unsigned int order)
+void __free_pages_bootmem(struct page *page, unsigned int order)
{
if (order == 0) {
__ClearPageReserved(page);
@@ -632,7 +632,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
if (PageReserved(page))
return 1;
- page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_readahead |
+ page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_reclaim |
1 << PG_referenced | 1 << PG_arch_1 |
1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
set_page_private(page, 0);
@@ -1050,7 +1050,7 @@ void split_page(struct page *page, unsigned int order)
* we cheat by calling it from here, in the order > 0 path. Saves a branch
* or two.
*/
-static struct page *buffered_rmqueue(struct zonelist *zonelist,
+static struct page *buffered_rmqueue(struct zone *preferred_zone,
struct zone *zone, int order, gfp_t gfp_flags)
{
unsigned long flags;
@@ -1102,7 +1102,7 @@ again:
}
__count_zone_vm_events(PGALLOC, zone, 1 << order);
- zone_statistics(zonelist, zone);
+ zone_statistics(preferred_zone, zone);
local_irq_restore(flags);
put_cpu();
@@ -1284,7 +1284,7 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
if (!zlc)
return NULL;
- if (time_after(jiffies, zlc->last_full_zap + HZ)) {
+ if (time_after(jiffies, zlc->last_full_zap + HZ)) {
bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
zlc->last_full_zap = jiffies;
}
@@ -1317,7 +1317,7 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
* We are low on memory in the second scan, and should leave no stone
* unturned looking for a free page.
*/
-static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
+static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
nodemask_t *allowednodes)
{
struct zonelist_cache *zlc; /* cached zonelist speedup info */
@@ -1328,7 +1328,7 @@ static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
if (!zlc)
return 1;
- i = z - zonelist->zones;
+ i = z - zonelist->_zonerefs;
n = zlc->z_to_n[i];
/* This zone is worth trying if it is allowed but not full */
@@ -1340,7 +1340,7 @@ static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
* zlc->fullzones, so that subsequent attempts to allocate a page
* from that zone don't waste time re-examining it.
*/
-static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
+static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
{
struct zonelist_cache *zlc; /* cached zonelist speedup info */
int i; /* index of *z in zonelist zones */
@@ -1349,7 +1349,7 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
if (!zlc)
return;
- i = z - zonelist->zones;
+ i = z - zonelist->_zonerefs;
set_bit(i, zlc->fullzones);
}
@@ -1361,13 +1361,13 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
return NULL;
}
-static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
+static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
nodemask_t *allowednodes)
{
return 1;
}
-static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
+static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
{
}
#endif /* CONFIG_NUMA */
@@ -1377,42 +1377,31 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
* a page.
*/
static struct page *
-get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
- struct zonelist *zonelist, int alloc_flags)
+get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
+ struct zonelist *zonelist, int high_zoneidx, int alloc_flags)
{
- struct zone **z;
+ struct zoneref *z;
struct page *page = NULL;
- int classzone_idx = zone_idx(zonelist->zones[0]);
- struct zone *zone;
+ int classzone_idx;
+ struct zone *zone, *preferred_zone;
nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
int zlc_active = 0; /* set if using zonelist_cache */
int did_zlc_setup = 0; /* just call zlc_setup() one time */
- enum zone_type highest_zoneidx = -1; /* Gets set for policy zonelists */
+
+ (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
+ &preferred_zone);
+ classzone_idx = zone_idx(preferred_zone);
zonelist_scan:
/*
* Scan zonelist, looking for a zone with enough free.
* See also cpuset_zone_allowed() comment in kernel/cpuset.c.
*/
- z = zonelist->zones;
-
- do {
- /*
- * In NUMA, this could be a policy zonelist which contains
- * zones that may not be allowed by the current gfp_mask.
- * Check the zone is allowed by the current flags
- */
- if (unlikely(alloc_should_filter_zonelist(zonelist))) {
- if (highest_zoneidx == -1)
- highest_zoneidx = gfp_zone(gfp_mask);
- if (zone_idx(*z) > highest_zoneidx)
- continue;
- }
-
+ for_each_zone_zonelist_nodemask(zone, z, zonelist,
+ high_zoneidx, nodemask) {
if (NUMA_BUILD && zlc_active &&
!zlc_zone_worth_trying(zonelist, z, allowednodes))
continue;
- zone = *z;
if ((alloc_flags & ALLOC_CPUSET) &&
!cpuset_zone_allowed_softwall(zone, gfp_mask))
goto try_next_zone;
@@ -1433,7 +1422,7 @@ zonelist_scan:
}
}
- page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
+ page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
if (page)
break;
this_zone_full:
@@ -1446,7 +1435,7 @@ try_next_zone:
zlc_active = 1;
did_zlc_setup = 1;
}
- } while (*(++z) != NULL);
+ }
if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
/* Disable zlc cache for second zonelist scan */
@@ -1459,12 +1448,14 @@ try_next_zone:
/*
* This is the 'heart' of the zoned buddy allocator.
*/
-struct page *
-__alloc_pages(gfp_t gfp_mask, unsigned int order,
- struct zonelist *zonelist)
+static struct page *
+__alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist, nodemask_t *nodemask)
{
const gfp_t wait = gfp_mask & __GFP_WAIT;
- struct zone **z;
+ enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+ struct zoneref *z;
+ struct zone *zone;
struct page *page;
struct reclaim_state reclaim_state;
struct task_struct *p = current;
@@ -1478,9 +1469,9 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order,
return NULL;
restart:
- z = zonelist->zones; /* the list of zones suitable for gfp_mask */
+ z = zonelist->_zonerefs; /* the list of zones suitable for gfp_mask */
- if (unlikely(*z == NULL)) {
+ if (unlikely(!z->zone)) {
/*
* Happens if we have an empty zonelist as a result of
* GFP_THISNODE being used on a memoryless node
@@ -1488,8 +1479,8 @@ restart:
return NULL;
}
- page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
- zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
+ page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
+ zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET);
if (page)
goto got_pg;
@@ -1504,8 +1495,8 @@ restart:
if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
goto nopage;
- for (z = zonelist->zones; *z; z++)
- wakeup_kswapd(*z, order);
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
+ wakeup_kswapd(zone, order);
/*
* OK, we're below the kswapd watermark and have kicked background
@@ -1533,7 +1524,8 @@ restart:
* Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
* See also cpuset_zone_allowed() comment in kernel/cpuset.c.
*/
- page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
+ page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
+ high_zoneidx, alloc_flags);
if (page)
goto got_pg;
@@ -1545,8 +1537,8 @@ rebalance:
if (!(gfp_mask & __GFP_NOMEMALLOC)) {
nofail_alloc:
/* go through the zonelist yet again, ignoring mins */
- page = get_page_from_freelist(gfp_mask, order,
- zonelist, ALLOC_NO_WATERMARKS);
+ page = get_page_from_freelist(gfp_mask, nodemask, order,
+ zonelist, high_zoneidx, ALLOC_NO_WATERMARKS);
if (page)
goto got_pg;
if (gfp_mask & __GFP_NOFAIL) {
@@ -1569,7 +1561,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
- did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
+ did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
p->reclaim_state = NULL;
p->flags &= ~PF_MEMALLOC;
@@ -1580,12 +1572,12 @@ nofail_alloc:
drain_all_pages();
if (likely(did_some_progress)) {
- page = get_page_from_freelist(gfp_mask, order,
- zonelist, alloc_flags);
+ page = get_page_from_freelist(gfp_mask, nodemask, order,
+ zonelist, high_zoneidx, alloc_flags);
if (page)
goto got_pg;
} else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
- if (!try_set_zone_oom(zonelist)) {
+ if (!try_set_zone_oom(zonelist, gfp_mask)) {
schedule_timeout_uninterruptible(1);
goto restart;
}
@@ -1596,21 +1588,22 @@ nofail_alloc:
* a parallel oom killing, we must fail if we're still
* under heavy pressure.
*/
- page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
- zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
+ page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
+ order, zonelist, high_zoneidx,
+ ALLOC_WMARK_HIGH|ALLOC_CPUSET);
if (page) {
- clear_zonelist_oom(zonelist);
+ clear_zonelist_oom(zonelist, gfp_mask);
goto got_pg;
}
/* The OOM killer will not help higher order allocs so fail */
if (order > PAGE_ALLOC_COSTLY_ORDER) {
- clear_zonelist_oom(zonelist);
+ clear_zonelist_oom(zonelist, gfp_mask);
goto nopage;
}
out_of_memory(zonelist, gfp_mask, order);
- clear_zonelist_oom(zonelist);
+ clear_zonelist_oom(zonelist, gfp_mask);
goto restart;
}
@@ -1646,6 +1639,20 @@ got_pg:
return page;
}
+struct page *
+__alloc_pages(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist)
+{
+ return __alloc_pages_internal(gfp_mask, order, zonelist, NULL);
+}
+
+struct page *
+__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist, nodemask_t *nodemask)
+{
+ return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask);
+}
+
EXPORT_SYMBOL(__alloc_pages);
/*
@@ -1712,15 +1719,15 @@ EXPORT_SYMBOL(free_pages);
static unsigned int nr_free_zone_pages(int offset)
{
+ struct zoneref *z;
+ struct zone *zone;
+
/* Just pick one node, since fallback list is circular */
- pg_data_t *pgdat = NODE_DATA(numa_node_id());
unsigned int sum = 0;
- struct zonelist *zonelist = pgdat->node_zonelists + offset;
- struct zone **zonep = zonelist->zones;
- struct zone *zone;
+ struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
- for (zone = *zonep++; zone; zone = *zonep++) {
+ for_each_zone_zonelist(zone, z, zonelist, offset) {
unsigned long size = zone->present_pages;
unsigned long high = zone->pages_high;
if (size > high)
@@ -1889,6 +1896,12 @@ void show_free_areas(void)
show_swap_cache_info();
}
+static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
+{
+ zoneref->zone = zone;
+ zoneref->zone_idx = zone_idx(zone);
+}
+
/*
* Builds allocation fallback zone lists.
*
@@ -1906,7 +1919,8 @@ static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
zone_type--;
zone = pgdat->node_zones + zone_type;
if (populated_zone(zone)) {
- zonelist->zones[nr_zones++] = zone;
+ zoneref_set_zone(zone,
+ &zonelist->_zonerefs[nr_zones++]);
check_highest_zone(zone_type);
}
@@ -2078,17 +2092,16 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
*/
static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
{
- enum zone_type i;
int j;
struct zonelist *zonelist;
- for (i = 0; i < MAX_NR_ZONES; i++) {
- zonelist = pgdat->node_zonelists + i;
- for (j = 0; zonelist->zones[j] != NULL; j++)
- ;
- j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
- zonelist->zones[j] = NULL;
- }
+ zonelist = &pgdat->node_zonelists[0];
+ for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
+ ;
+ j = build_zonelists_node(NODE_DATA(node), zonelist, j,
+ MAX_NR_ZONES - 1);
+ zonelist->_zonerefs[j].zone = NULL;
+ zonelist->_zonerefs[j].zone_idx = 0;
}
/*
@@ -2096,15 +2109,13 @@ static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
*/
static void build_thisnode_zonelists(pg_data_t *pgdat)
{
- enum zone_type i;
int j;
struct zonelist *zonelist;
- for (i = 0; i < MAX_NR_ZONES; i++) {
- zonelist = pgdat->node_zonelists + MAX_NR_ZONES + i;
- j = build_zonelists_node(pgdat, zonelist, 0, i);
- zonelist->zones[j] = NULL;
- }
+ zonelist = &pgdat->node_zonelists[1];
+ j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
+ zonelist->_zonerefs[j].zone = NULL;
+ zonelist->_zonerefs[j].zone_idx = 0;
}
/*
@@ -2117,27 +2128,26 @@ static int node_order[MAX_NUMNODES];
static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
{
- enum zone_type i;
int pos, j, node;
int zone_type; /* needs to be signed */
struct zone *z;
struct zonelist *zonelist;
- for (i = 0; i < MAX_NR_ZONES; i++) {
- zonelist = pgdat->node_zonelists + i;
- pos = 0;
- for (zone_type = i; zone_type >= 0; zone_type--) {
- for (j = 0; j < nr_nodes; j++) {
- node = node_order[j];
- z = &NODE_DATA(node)->node_zones[zone_type];
- if (populated_zone(z)) {
- zonelist->zones[pos++] = z;
- check_highest_zone(zone_type);
- }
+ zonelist = &pgdat->node_zonelists[0];
+ pos = 0;
+ for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
+ for (j = 0; j < nr_nodes; j++) {
+ node = node_order[j];
+ z = &NODE_DATA(node)->node_zones[zone_type];
+ if (populated_zone(z)) {
+ zoneref_set_zone(z,
+ &zonelist->_zonerefs[pos++]);
+ check_highest_zone(zone_type);
}
}
- zonelist->zones[pos] = NULL;
}
+ zonelist->_zonerefs[pos].zone = NULL;
+ zonelist->_zonerefs[pos].zone_idx = 0;
}
static int default_zonelist_order(void)
@@ -2214,7 +2224,8 @@ static void build_zonelists(pg_data_t *pgdat)
/* initialize zonelists */
for (i = 0; i < MAX_ZONELISTS; i++) {
zonelist = pgdat->node_zonelists + i;
- zonelist->zones[0] = NULL;
+ zonelist->_zonerefs[0].zone = NULL;
+ zonelist->_zonerefs[0].zone_idx = 0;
}
/* NUMA-aware ordering of nodes */
@@ -2264,19 +2275,15 @@ static void build_zonelists(pg_data_t *pgdat)
/* Construct the zonelist performance cache - see further mmzone.h */
static void build_zonelist_cache(pg_data_t *pgdat)
{
- int i;
-
- for (i = 0; i < MAX_NR_ZONES; i++) {
- struct zonelist *zonelist;
- struct zonelist_cache *zlc;
- struct zone **z;
+ struct zonelist *zonelist;
+ struct zonelist_cache *zlc;
+ struct zoneref *z;
- zonelist = pgdat->node_zonelists + i;
- zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
- bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
- for (z = zonelist->zones; *z; z++)
- zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z);
- }
+ zonelist = &pgdat->node_zonelists[0];
+ zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
+ bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
+ for (z = zonelist->_zonerefs; z->zone; z++)
+ zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
}
@@ -2290,45 +2297,44 @@ static void set_zonelist_order(void)
static void build_zonelists(pg_data_t *pgdat)
{
int node, local_node;
- enum zone_type i,j;
+ enum zone_type j;
+ struct zonelist *zonelist;
local_node = pgdat->node_id;
- for (i = 0; i < MAX_NR_ZONES; i++) {
- struct zonelist *zonelist;
- zonelist = pgdat->node_zonelists + i;
+ zonelist = &pgdat->node_zonelists[0];
+ j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
- j = build_zonelists_node(pgdat, zonelist, 0, i);
- /*
- * Now we build the zonelist so that it contains the zones
- * of all the other nodes.
- * We don't want to pressure a particular node, so when
- * building the zones for node N, we make sure that the
- * zones coming right after the local ones are those from
- * node N+1 (modulo N)
- */
- for (node = local_node + 1; node < MAX_NUMNODES; node++) {
- if (!node_online(node))
- continue;
- j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
- }
- for (node = 0; node < local_node; node++) {
- if (!node_online(node))
- continue;
- j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
- }
-
- zonelist->zones[j] = NULL;
+ /*
+ * Now we build the zonelist so that it contains the zones
+ * of all the other nodes.
+ * We don't want to pressure a particular node, so when
+ * building the zones for node N, we make sure that the
+ * zones coming right after the local ones are those from
+ * node N+1 (modulo N)
+ */
+ for (node = local_node + 1; node < MAX_NUMNODES; node++) {
+ if (!node_online(node))
+ continue;
+ j = build_zonelists_node(NODE_DATA(node), zonelist, j,
+ MAX_NR_ZONES - 1);
+ }
+ for (node = 0; node < local_node; node++) {
+ if (!node_online(node))
+ continue;
+ j = build_zonelists_node(NODE_DATA(node), zonelist, j,
+ MAX_NR_ZONES - 1);
}
+
+ zonelist->_zonerefs[j].zone = NULL;
+ zonelist->_zonerefs[j].zone_idx = 0;
}
/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
static void build_zonelist_cache(pg_data_t *pgdat)
{
- int i;
-
- for (i = 0; i < MAX_NR_ZONES; i++)
- pgdat->node_zonelists[i].zlcache_ptr = NULL;
+ pgdat->node_zonelists[0].zlcache_ptr = NULL;
+ pgdat->node_zonelists[1].zlcache_ptr = NULL;
}
#endif /* CONFIG_NUMA */
@@ -4339,9 +4345,7 @@ void *__init alloc_large_system_hash(const char *tablename,
else if (hashdist)
table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
else {
- unsigned long order;
- for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
- ;
+ unsigned long order = get_order(size);
table = (void*) __get_free_pages(GFP_ATOMIC, order);
/*
* If bucketsize is not a power-of-two, we may free
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 1cf1417ef8b7..0afd2387e507 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -9,11 +9,15 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
pte = pte_offset_map(pmd, addr);
- do {
+ for (;;) {
err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, private);
if (err)
break;
- } while (pte++, addr += PAGE_SIZE, addr != end);
+ addr += PAGE_SIZE;
+ if (addr == end)
+ break;
+ pte++;
+ }
pte_unmap(pte);
return err;
diff --git a/mm/rmap.c b/mm/rmap.c
index 997f06907b6d..bf0a5b7cfb8e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -413,9 +413,6 @@ int page_referenced(struct page *page, int is_locked,
{
int referenced = 0;
- if (page_test_and_clear_young(page))
- referenced++;
-
if (TestClearPageReferenced(page))
referenced++;
@@ -433,6 +430,10 @@ int page_referenced(struct page *page, int is_locked,
unlock_page(page);
}
}
+
+ if (page_test_and_clear_young(page))
+ referenced++;
+
return referenced;
}
@@ -661,7 +662,6 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
printk (KERN_EMERG " page->mapping = %p\n", page->mapping);
print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops);
if (vma->vm_ops) {
- print_symbol (KERN_EMERG " vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage);
print_symbol (KERN_EMERG " vma->vm_ops->fault = %s\n", (unsigned long)vma->vm_ops->fault);
}
if (vma->vm_file && vma->vm_file->f_op)
diff --git a/mm/shmem.c b/mm/shmem.c
index f514dd392cd9..e6d9298aa22a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1079,104 +1079,47 @@ redirty:
#ifdef CONFIG_NUMA
#ifdef CONFIG_TMPFS
-static int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
+static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
{
- char *nodelist = strchr(value, ':');
- int err = 1;
+ char buffer[64];
- if (nodelist) {
- /* NUL-terminate policy string */
- *nodelist++ = '\0';
- if (nodelist_parse(nodelist, *policy_nodes))
- goto out;
- if (!nodes_subset(*policy_nodes, node_states[N_HIGH_MEMORY]))
- goto out;
- }
- if (!strcmp(value, "default")) {
- *policy = MPOL_DEFAULT;
- /* Don't allow a nodelist */
- if (!nodelist)
- err = 0;
- } else if (!strcmp(value, "prefer")) {
- *policy = MPOL_PREFERRED;
- /* Insist on a nodelist of one node only */
- if (nodelist) {
- char *rest = nodelist;
- while (isdigit(*rest))
- rest++;
- if (!*rest)
- err = 0;
- }
- } else if (!strcmp(value, "bind")) {
- *policy = MPOL_BIND;
- /* Insist on a nodelist */
- if (nodelist)
- err = 0;
- } else if (!strcmp(value, "interleave")) {
- *policy = MPOL_INTERLEAVE;
- /*
- * Default to online nodes with memory if no nodelist
- */
- if (!nodelist)
- *policy_nodes = node_states[N_HIGH_MEMORY];
- err = 0;
- }
-out:
- /* Restore string for error message */
- if (nodelist)
- *--nodelist = ':';
- return err;
-}
-
-static void shmem_show_mpol(struct seq_file *seq, int policy,
- const nodemask_t policy_nodes)
-{
- char *policy_string;
+ if (!mpol || mpol->mode == MPOL_DEFAULT)
+ return; /* show nothing */
- switch (policy) {
- case MPOL_PREFERRED:
- policy_string = "prefer";
- break;
- case MPOL_BIND:
- policy_string = "bind";
- break;
- case MPOL_INTERLEAVE:
- policy_string = "interleave";
- break;
- default:
- /* MPOL_DEFAULT */
- return;
- }
+ mpol_to_str(buffer, sizeof(buffer), mpol, 1);
- seq_printf(seq, ",mpol=%s", policy_string);
-
- if (policy != MPOL_INTERLEAVE ||
- !nodes_equal(policy_nodes, node_states[N_HIGH_MEMORY])) {
- char buffer[64];
- int len;
+ seq_printf(seq, ",mpol=%s", buffer);
+}
- len = nodelist_scnprintf(buffer, sizeof(buffer), policy_nodes);
- if (len < sizeof(buffer))
- seq_printf(seq, ":%s", buffer);
- else
- seq_printf(seq, ":?");
+static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
+{
+ struct mempolicy *mpol = NULL;
+ if (sbinfo->mpol) {
+ spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
+ mpol = sbinfo->mpol;
+ mpol_get(mpol);
+ spin_unlock(&sbinfo->stat_lock);
}
+ return mpol;
}
#endif /* CONFIG_TMPFS */
static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
struct shmem_inode_info *info, unsigned long idx)
{
+ struct mempolicy mpol, *spol;
struct vm_area_struct pvma;
struct page *page;
+ spol = mpol_cond_copy(&mpol,
+ mpol_shared_policy_lookup(&info->policy, idx));
+
/* Create a pseudo vma that just contains the policy */
pvma.vm_start = 0;
pvma.vm_pgoff = idx;
pvma.vm_ops = NULL;
- pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
+ pvma.vm_policy = spol;
page = swapin_readahead(entry, gfp, &pvma, 0);
- mpol_free(pvma.vm_policy);
return page;
}
@@ -1184,27 +1127,21 @@ static struct page *shmem_alloc_page(gfp_t gfp,
struct shmem_inode_info *info, unsigned long idx)
{
struct vm_area_struct pvma;
- struct page *page;
/* Create a pseudo vma that just contains the policy */
pvma.vm_start = 0;
pvma.vm_pgoff = idx;
pvma.vm_ops = NULL;
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
- page = alloc_page_vma(gfp, &pvma, 0);
- mpol_free(pvma.vm_policy);
- return page;
+
+ /*
+ * alloc_page_vma() will drop the shared policy reference
+ */
+ return alloc_page_vma(gfp, &pvma, 0);
}
#else /* !CONFIG_NUMA */
#ifdef CONFIG_TMPFS
-static inline int shmem_parse_mpol(char *value, int *policy,
- nodemask_t *policy_nodes)
-{
- return 1;
-}
-
-static inline void shmem_show_mpol(struct seq_file *seq, int policy,
- const nodemask_t policy_nodes)
+static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
{
}
#endif /* CONFIG_TMPFS */
@@ -1222,6 +1159,13 @@ static inline struct page *shmem_alloc_page(gfp_t gfp,
}
#endif /* CONFIG_NUMA */
+#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
+static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
+{
+ return NULL;
+}
+#endif
+
/*
* shmem_getpage - either get the page from swap or allocate a new one
*
@@ -1576,8 +1520,8 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
case S_IFREG:
inode->i_op = &shmem_inode_operations;
inode->i_fop = &shmem_file_operations;
- mpol_shared_policy_init(&info->policy, sbinfo->policy,
- &sbinfo->policy_nodes);
+ mpol_shared_policy_init(&info->policy,
+ shmem_get_sbmpol(sbinfo));
break;
case S_IFDIR:
inc_nlink(inode);
@@ -1591,8 +1535,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
* Must not load anything in the rbtree,
* mpol_free_shared_policy will not be called.
*/
- mpol_shared_policy_init(&info->policy, MPOL_DEFAULT,
- NULL);
+ mpol_shared_policy_init(&info->policy, NULL);
break;
}
} else
@@ -2207,8 +2150,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
if (*rest)
goto bad_val;
} else if (!strcmp(this_char,"mpol")) {
- if (shmem_parse_mpol(value, &sbinfo->policy,
- &sbinfo->policy_nodes))
+ if (mpol_parse_str(value, &sbinfo->mpol, 1))
goto bad_val;
} else {
printk(KERN_ERR "tmpfs: Bad mount option %s\n",
@@ -2259,8 +2201,9 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
sbinfo->free_blocks = config.max_blocks - blocks;
sbinfo->max_inodes = config.max_inodes;
sbinfo->free_inodes = config.max_inodes - inodes;
- sbinfo->policy = config.policy;
- sbinfo->policy_nodes = config.policy_nodes;
+
+ mpol_put(sbinfo->mpol);
+ sbinfo->mpol = config.mpol; /* transfers initial ref */
out:
spin_unlock(&sbinfo->stat_lock);
return error;
@@ -2281,7 +2224,7 @@ static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_printf(seq, ",uid=%u", sbinfo->uid);
if (sbinfo->gid != 0)
seq_printf(seq, ",gid=%u", sbinfo->gid);
- shmem_show_mpol(seq, sbinfo->policy, sbinfo->policy_nodes);
+ shmem_show_mpol(seq, sbinfo->mpol);
return 0;
}
#endif /* CONFIG_TMPFS */
@@ -2311,8 +2254,7 @@ static int shmem_fill_super(struct super_block *sb,
sbinfo->mode = S_IRWXUGO | S_ISVTX;
sbinfo->uid = current->fsuid;
sbinfo->gid = current->fsgid;
- sbinfo->policy = MPOL_DEFAULT;
- sbinfo->policy_nodes = node_states[N_HIGH_MEMORY];
+ sbinfo->mpol = NULL;
sb->s_fs_info = sbinfo;
#ifdef CONFIG_TMPFS
diff --git a/mm/slab.c b/mm/slab.c
index 03927cb5ec9e..39d20f8a0791 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -139,10 +139,6 @@
#define BYTES_PER_WORD sizeof(void *)
#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
-#ifndef cache_line_size
-#define cache_line_size() L1_CACHE_BYTES
-#endif
-
#ifndef ARCH_KMALLOC_MINALIGN
/*
* Enforce a minimum alignment for the kmalloc caches.
@@ -3242,15 +3238,16 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
{
struct zonelist *zonelist;
gfp_t local_flags;
- struct zone **z;
+ struct zoneref *z;
+ struct zone *zone;
+ enum zone_type high_zoneidx = gfp_zone(flags);
void *obj = NULL;
int nid;
if (flags & __GFP_THISNODE)
return NULL;
- zonelist = &NODE_DATA(slab_node(current->mempolicy))
- ->node_zonelists[gfp_zone(flags)];
+ zonelist = node_zonelist(slab_node(current->mempolicy), flags);
local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
retry:
@@ -3258,10 +3255,10 @@ retry:
* Look through allowed nodes for objects available
* from existing per node queues.
*/
- for (z = zonelist->zones; *z && !obj; z++) {
- nid = zone_to_nid(*z);
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+ nid = zone_to_nid(zone);
- if (cpuset_zone_allowed_hardwall(*z, flags) &&
+ if (cpuset_zone_allowed_hardwall(zone, flags) &&
cache->nodelists[nid] &&
cache->nodelists[nid]->free_objects)
obj = ____cache_alloc_node(cache,
diff --git a/mm/slub.c b/mm/slub.c
index 39592b5ce68a..38914bc64aca 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -207,11 +207,6 @@ static inline void ClearSlabDebug(struct page *page)
#define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */
#define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */
-/* Not all arches define cache_line_size */
-#ifndef cache_line_size
-#define cache_line_size() L1_CACHE_BYTES
-#endif
-
static int kmem_size = sizeof(struct kmem_cache);
#ifdef CONFIG_SMP
@@ -1284,7 +1279,9 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
{
#ifdef CONFIG_NUMA
struct zonelist *zonelist;
- struct zone **z;
+ struct zoneref *z;
+ struct zone *zone;
+ enum zone_type high_zoneidx = gfp_zone(flags);
struct page *page;
/*
@@ -1309,14 +1306,13 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
get_cycles() % 1024 > s->remote_node_defrag_ratio)
return NULL;
- zonelist = &NODE_DATA(
- slab_node(current->mempolicy))->node_zonelists[gfp_zone(flags)];
- for (z = zonelist->zones; *z; z++) {
+ zonelist = node_zonelist(slab_node(current->mempolicy), flags);
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
struct kmem_cache_node *n;
- n = get_node(s, zone_to_nid(*z));
+ n = get_node(s, zone_to_nid(zone));
- if (n && cpuset_zone_allowed_hardwall(*z, flags) &&
+ if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
n->nr_partial > MIN_PARTIAL) {
page = get_partial_node(n);
if (page)
diff --git a/mm/sparse.c b/mm/sparse.c
index 7e9191381f86..dff71f173ae9 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
+#include "internal.h"
#include <asm/dma.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
@@ -208,12 +209,12 @@ static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long p
}
/*
- * We need this if we ever free the mem_maps. While not implemented yet,
- * this function is included for parity with its sibling.
+ * Decode mem_map from the coded memmap
*/
-static __attribute((unused))
struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
{
+ /* mask off the extra low bits of information */
+ coded_mem_map &= SECTION_MAP_MASK;
return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
}
@@ -232,7 +233,7 @@ static int __meminit sparse_init_one_section(struct mem_section *ms,
return 1;
}
-static unsigned long usemap_size(void)
+unsigned long usemap_size(void)
{
unsigned long size_bytes;
size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
@@ -249,11 +250,22 @@ static unsigned long *__kmalloc_section_usemap(void)
static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum)
{
- unsigned long *usemap;
+ unsigned long *usemap, section_nr;
struct mem_section *ms = __nr_to_section(pnum);
int nid = sparse_early_nid(ms);
+ struct pglist_data *pgdat = NODE_DATA(nid);
- usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size());
+ /*
+ * Usemap's page can't be freed until freeing other sections
+ * which use it. And, Pgdat has same feature.
+ * If section A has pgdat and section B has usemap for other
+ * sections (includes section A), both sections can't be removed,
+ * because there is the dependency each other.
+ * To solve above issue, this collects all usemap on the same section
+ * which has pgdat.
+ */
+ section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
+ usemap = alloc_bootmem_section(usemap_size(), section_nr);
if (usemap)
return usemap;
@@ -273,8 +285,8 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
if (map)
return map;
- map = alloc_bootmem_node(NODE_DATA(nid),
- sizeof(struct page) * PAGES_PER_SECTION);
+ map = alloc_bootmem_pages_node(NODE_DATA(nid),
+ PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
return map;
}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
@@ -365,6 +377,9 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
{
return; /* XXX: Not implemented yet */
}
+static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+{
+}
#else
static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
{
@@ -402,8 +417,69 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
free_pages((unsigned long)memmap,
get_order(sizeof(struct page) * nr_pages));
}
+
+static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+{
+ unsigned long maps_section_nr, removing_section_nr, i;
+ int magic;
+
+ for (i = 0; i < nr_pages; i++, page++) {
+ magic = atomic_read(&page->_mapcount);
+
+ BUG_ON(magic == NODE_INFO);
+
+ maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
+ removing_section_nr = page->private;
+
+ /*
+ * When this function is called, the removing section is
+ * logical offlined state. This means all pages are isolated
+ * from page allocator. If removing section's memmap is placed
+ * on the same section, it must not be freed.
+ * If it is freed, page allocator may allocate it which will
+ * be removed physically soon.
+ */
+ if (maps_section_nr != removing_section_nr)
+ put_page_bootmem(page);
+ }
+}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+static void free_section_usemap(struct page *memmap, unsigned long *usemap)
+{
+ struct page *usemap_page;
+ unsigned long nr_pages;
+
+ if (!usemap)
+ return;
+
+ usemap_page = virt_to_page(usemap);
+ /*
+ * Check to see if allocation came from hot-plug-add
+ */
+ if (PageSlab(usemap_page)) {
+ kfree(usemap);
+ if (memmap)
+ __kfree_section_memmap(memmap, PAGES_PER_SECTION);
+ return;
+ }
+
+ /*
+ * The usemap came from bootmem. This is packed with other usemaps
+ * on the section which has pgdat at boot time. Just keep it as is now.
+ */
+
+ if (memmap) {
+ struct page *memmap_page;
+ memmap_page = virt_to_page(memmap);
+
+ nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
+ >> PAGE_SHIFT;
+
+ free_map_bootmem(memmap_page, nr_pages);
+ }
+}
+
/*
* returns the number of sections whose mem_maps were properly
* set. If this is <=0, then that means that the passed-in
@@ -456,4 +532,20 @@ out:
}
return ret;
}
+
+void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
+{
+ struct page *memmap = NULL;
+ unsigned long *usemap = NULL;
+
+ if (ms->section_mem_map) {
+ usemap = ms->pageblock_flags;
+ memmap = sparse_decode_mem_map(ms->section_mem_map,
+ __section_nr(ms));
+ ms->section_mem_map = 0;
+ ms->pageblock_flags = NULL;
+ }
+
+ free_section_usemap(memmap, usemap);
+}
#endif
diff --git a/mm/swap.c b/mm/swap.c
index aa1139ccf3a7..91e194445a5e 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -132,34 +132,21 @@ static void pagevec_move_tail(struct pagevec *pvec)
* Writeback is about to end against a page which has been marked for immediate
* reclaim. If it still appears to be reclaimable, move it to the tail of the
* inactive list.
- *
- * Returns zero if it cleared PG_writeback.
*/
-int rotate_reclaimable_page(struct page *page)
+void rotate_reclaimable_page(struct page *page)
{
- struct pagevec *pvec;
- unsigned long flags;
-
- if (PageLocked(page))
- return 1;
- if (PageDirty(page))
- return 1;
- if (PageActive(page))
- return 1;
- if (!PageLRU(page))
- return 1;
-
- page_cache_get(page);
- local_irq_save(flags);
- pvec = &__get_cpu_var(lru_rotate_pvecs);
- if (!pagevec_add(pvec, page))
- pagevec_move_tail(pvec);
- local_irq_restore(flags);
-
- if (!test_clear_page_writeback(page))
- BUG();
+ if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
+ PageLRU(page)) {
+ struct pagevec *pvec;
+ unsigned long flags;
- return 0;
+ page_cache_get(page);
+ local_irq_save(flags);
+ pvec = &__get_cpu_var(lru_rotate_pvecs);
+ if (!pagevec_add(pvec, page))
+ pagevec_move_tail(pvec);
+ local_irq_restore(flags);
+ }
}
/*
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2da149cfc9ac..67051be7083a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1582,6 +1582,14 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
error = -EINVAL;
goto bad_swap;
case 2:
+ /* swap partition endianess hack... */
+ if (swab32(swap_header->info.version) == 1) {
+ swab32s(&swap_header->info.version);
+ swab32s(&swap_header->info.last_page);
+ swab32s(&swap_header->info.nr_badpages);
+ for (i = 0; i < swap_header->info.nr_badpages; i++)
+ swab32s(&swap_header->info.badpages[i]);
+ }
/* Check the swap header's sub-version and the size of
the swap file and bad block lists */
if (swap_header->info.version != 1) {
diff --git a/mm/truncate.c b/mm/truncate.c
index 7d20ce41ecf5..b8961cb63414 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -391,6 +391,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t next;
int i;
int ret = 0;
+ int ret2 = 0;
int did_range_unmap = 0;
int wrapped = 0;
@@ -438,9 +439,13 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
}
}
BUG_ON(page_mapped(page));
- ret = do_launder_page(mapping, page);
- if (ret == 0 && !invalidate_complete_page2(mapping, page))
- ret = -EIO;
+ ret2 = do_launder_page(mapping, page);
+ if (ret2 == 0) {
+ if (!invalidate_complete_page2(mapping, page))
+ ret2 = -EIO;
+ }
+ if (ret2 < 0)
+ ret = ret2;
unlock_page(page);
}
pagevec_release(&pvec);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ecf91f8034bf..e33e0ae69ad1 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -14,8 +14,9 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
-
+#include <linux/seq_file.h>
#include <linux/vmalloc.h>
+#include <linux/kallsyms.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
@@ -25,7 +26,7 @@ DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
- int node);
+ int node, void *caller);
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
{
@@ -204,9 +205,9 @@ unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
}
EXPORT_SYMBOL(vmalloc_to_pfn);
-static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
- unsigned long start, unsigned long end,
- int node, gfp_t gfp_mask)
+static struct vm_struct *
+__get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start,
+ unsigned long end, int node, gfp_t gfp_mask, void *caller)
{
struct vm_struct **p, *tmp, *area;
unsigned long align = 1;
@@ -269,6 +270,7 @@ found:
area->pages = NULL;
area->nr_pages = 0;
area->phys_addr = 0;
+ area->caller = caller;
write_unlock(&vmlist_lock);
return area;
@@ -284,7 +286,8 @@ out:
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end)
{
- return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
+ return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL_GPL(__get_vm_area);
@@ -299,14 +302,22 @@ EXPORT_SYMBOL_GPL(__get_vm_area);
*/
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
- return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
+ return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
+ -1, GFP_KERNEL, __builtin_return_address(0));
+}
+
+struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
+ void *caller)
+{
+ return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
+ -1, GFP_KERNEL, caller);
}
struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
int node, gfp_t gfp_mask)
{
return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
- gfp_mask);
+ gfp_mask, __builtin_return_address(0));
}
/* Caller must hold vmlist_lock */
@@ -455,9 +466,11 @@ void *vmap(struct page **pages, unsigned int count,
if (count > num_physpages)
return NULL;
- area = get_vm_area((count << PAGE_SHIFT), flags);
+ area = get_vm_area_caller((count << PAGE_SHIFT), flags,
+ __builtin_return_address(0));
if (!area)
return NULL;
+
if (map_vm_area(area, prot, &pages)) {
vunmap(area->addr);
return NULL;
@@ -468,7 +481,7 @@ void *vmap(struct page **pages, unsigned int count,
EXPORT_SYMBOL(vmap);
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
- pgprot_t prot, int node)
+ pgprot_t prot, int node, void *caller)
{
struct page **pages;
unsigned int nr_pages, array_size, i;
@@ -480,7 +493,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
- PAGE_KERNEL, node);
+ PAGE_KERNEL, node, caller);
area->flags |= VM_VPAGES;
} else {
pages = kmalloc_node(array_size,
@@ -488,6 +501,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
node);
}
area->pages = pages;
+ area->caller = caller;
if (!area->pages) {
remove_vm_area(area->addr);
kfree(area);
@@ -521,7 +535,8 @@ fail:
void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
{
- return __vmalloc_area_node(area, gfp_mask, prot, -1);
+ return __vmalloc_area_node(area, gfp_mask, prot, -1,
+ __builtin_return_address(0));
}
/**
@@ -536,7 +551,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
* kernel virtual space, using a pagetable protection of @prot.
*/
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
- int node)
+ int node, void *caller)
{
struct vm_struct *area;
@@ -544,16 +559,19 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
if (!size || (size >> PAGE_SHIFT) > num_physpages)
return NULL;
- area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
+ area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
+ node, gfp_mask, caller);
+
if (!area)
return NULL;
- return __vmalloc_area_node(area, gfp_mask, prot, node);
+ return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
}
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
- return __vmalloc_node(size, gfp_mask, prot, -1);
+ return __vmalloc_node(size, gfp_mask, prot, -1,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(__vmalloc);
@@ -568,7 +586,8 @@ EXPORT_SYMBOL(__vmalloc);
*/
void *vmalloc(unsigned long size)
{
- return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
+ return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
+ -1, __builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc);
@@ -608,7 +627,8 @@ EXPORT_SYMBOL(vmalloc_user);
*/
void *vmalloc_node(unsigned long size, int node)
{
- return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
+ return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
+ node, __builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_node);
@@ -843,7 +863,8 @@ struct vm_struct *alloc_vm_area(size_t size)
{
struct vm_struct *area;
- area = get_vm_area(size, VM_IOREMAP);
+ area = get_vm_area_caller(size, VM_IOREMAP,
+ __builtin_return_address(0));
if (area == NULL)
return NULL;
@@ -873,3 +894,85 @@ void free_vm_area(struct vm_struct *area)
kfree(area);
}
EXPORT_SYMBOL_GPL(free_vm_area);
+
+
+#ifdef CONFIG_PROC_FS
+static void *s_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t n = *pos;
+ struct vm_struct *v;
+
+ read_lock(&vmlist_lock);
+ v = vmlist;
+ while (n > 0 && v) {
+ n--;
+ v = v->next;
+ }
+ if (!n)
+ return v;
+
+ return NULL;
+
+}
+
+static void *s_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ struct vm_struct *v = p;
+
+ ++*pos;
+ return v->next;
+}
+
+static void s_stop(struct seq_file *m, void *p)
+{
+ read_unlock(&vmlist_lock);
+}
+
+static int s_show(struct seq_file *m, void *p)
+{
+ struct vm_struct *v = p;
+
+ seq_printf(m, "0x%p-0x%p %7ld",
+ v->addr, v->addr + v->size, v->size);
+
+ if (v->caller) {
+ char buff[2 * KSYM_NAME_LEN];
+
+ seq_putc(m, ' ');
+ sprint_symbol(buff, (unsigned long)v->caller);
+ seq_puts(m, buff);
+ }
+
+ if (v->nr_pages)
+ seq_printf(m, " pages=%d", v->nr_pages);
+
+ if (v->phys_addr)
+ seq_printf(m, " phys=%lx", v->phys_addr);
+
+ if (v->flags & VM_IOREMAP)
+ seq_printf(m, " ioremap");
+
+ if (v->flags & VM_ALLOC)
+ seq_printf(m, " vmalloc");
+
+ if (v->flags & VM_MAP)
+ seq_printf(m, " vmap");
+
+ if (v->flags & VM_USERMAP)
+ seq_printf(m, " user");
+
+ if (v->flags & VM_VPAGES)
+ seq_printf(m, " vpages");
+
+ seq_putc(m, '\n');
+ return 0;
+}
+
+const struct seq_operations vmalloc_op = {
+ .start = s_start,
+ .next = s_next,
+ .stop = s_stop,
+ .show = s_show,
+};
+#endif
+
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f80a5b7c057f..eceac9f9032f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1246,17 +1246,16 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
* If a zone is deemed to be full of pinned pages then just give it a light
* scan then give up on it.
*/
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
{
+ enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
unsigned long nr_reclaimed = 0;
- int i;
-
+ struct zoneref *z;
+ struct zone *zone;
sc->all_unreclaimable = 1;
- for (i = 0; zones[i] != NULL; i++) {
- struct zone *zone = zones[i];
-
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
if (!populated_zone(zone))
continue;
/*
@@ -1301,8 +1300,8 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
* holds filesystem locks which prevent writeout this might not work, and the
* allocation attempt will fail.
*/
-static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
- struct scan_control *sc)
+static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+ struct scan_control *sc)
{
int priority;
int ret = 0;
@@ -1310,7 +1309,9 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long lru_pages = 0;
- int i;
+ struct zoneref *z;
+ struct zone *zone;
+ enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
if (scan_global_lru(sc))
count_vm_event(ALLOCSTALL);
@@ -1318,8 +1319,7 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
* mem_cgroup will not do shrink_slab.
*/
if (scan_global_lru(sc)) {
- for (i = 0; zones[i] != NULL; i++) {
- struct zone *zone = zones[i];
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
@@ -1333,13 +1333,13 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
sc->nr_scanned = 0;
if (!priority)
disable_swap_token();
- nr_reclaimed += shrink_zones(priority, zones, sc);
+ nr_reclaimed += shrink_zones(priority, zonelist, sc);
/*
* Don't shrink slabs when reclaiming memory from
* over limit cgroups
*/
if (scan_global_lru(sc)) {
- shrink_slab(sc->nr_scanned, gfp_mask, lru_pages);
+ shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
if (reclaim_state) {
nr_reclaimed += reclaim_state->reclaimed_slab;
reclaim_state->reclaimed_slab = 0;
@@ -1383,8 +1383,7 @@ out:
priority = 0;
if (scan_global_lru(sc)) {
- for (i = 0; zones[i] != NULL; i++) {
- struct zone *zone = zones[i];
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
@@ -1397,7 +1396,8 @@ out:
return ret;
}
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+ gfp_t gfp_mask)
{
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1410,7 +1410,7 @@ unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
.isolate_pages = isolate_pages_global,
};
- return do_try_to_free_pages(zones, gfp_mask, &sc);
+ return do_try_to_free_pages(zonelist, &sc);
}
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
@@ -1419,7 +1419,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
gfp_t gfp_mask)
{
struct scan_control sc = {
- .gfp_mask = gfp_mask,
.may_writepage = !laptop_mode,
.may_swap = 1,
.swap_cluster_max = SWAP_CLUSTER_MAX,
@@ -1428,13 +1427,12 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
.mem_cgroup = mem_cont,
.isolate_pages = mem_cgroup_isolate_pages,
};
- struct zone **zones;
- int target_zone = gfp_zone(GFP_HIGHUSER_MOVABLE);
+ struct zonelist *zonelist;
- zones = NODE_DATA(numa_node_id())->node_zonelists[target_zone].zones;
- if (do_try_to_free_pages(zones, sc.gfp_mask, &sc))
- return 1;
- return 0;
+ sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
+ (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
+ zonelist = NODE_DATA(numa_node_id())->node_zonelists;
+ return do_try_to_free_pages(zonelist, &sc);
}
#endif
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 7c7286e9506d..ec6035eda933 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -322,6 +322,7 @@ void refresh_cpu_vm_stats(int cpu)
p->expire = 3;
#endif
}
+ cond_resched();
#ifdef CONFIG_NUMA
/*
* Deal with draining the remote pageset of this
@@ -364,13 +365,13 @@ void refresh_cpu_vm_stats(int cpu)
*
* Must be called with interrupts disabled.
*/
-void zone_statistics(struct zonelist *zonelist, struct zone *z)
+void zone_statistics(struct zone *preferred_zone, struct zone *z)
{
- if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) {
+ if (z->zone_pgdat == preferred_zone->zone_pgdat) {
__inc_zone_state(z, NUMA_HIT);
} else {
__inc_zone_state(z, NUMA_MISS);
- __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN);
+ __inc_zone_state(preferred_zone, NUMA_FOREIGN);
}
if (z->node == numa_node_id())
__inc_zone_state(z, NUMA_LOCAL);
@@ -645,6 +646,10 @@ static const char * const vmstat_text[] = {
"allocstall",
"pgrotated",
+#ifdef CONFIG_HUGETLB_PAGE
+ "htlb_buddy_alloc_success",
+ "htlb_buddy_alloc_fail",
+#endif
#endif
};
diff --git a/net/can/raw.c b/net/can/raw.c
index 201cbfc6b9ec..69877b8e7e9c 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -435,15 +435,13 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
if (!filter)
return -ENOMEM;
- err = copy_from_user(filter, optval, optlen);
- if (err) {
+ if (copy_from_user(filter, optval, optlen)) {
kfree(filter);
- return err;
+ return -EFAULT;
}
} else if (count == 1) {
- err = copy_from_user(&sfilter, optval, optlen);
- if (err)
- return err;
+ if (copy_from_user(&sfilter, optval, optlen))
+ return -EFAULT;
}
lock_sock(sk);
@@ -493,9 +491,8 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
if (optlen != sizeof(err_mask))
return -EINVAL;
- err = copy_from_user(&err_mask, optval, optlen);
- if (err)
- return err;
+ if (copy_from_user(&err_mask, optval, optlen))
+ return -EFAULT;
err_mask &= CAN_ERR_MASK;
@@ -531,7 +528,8 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
if (optlen != sizeof(ro->loopback))
return -EINVAL;
- err = copy_from_user(&ro->loopback, optval, optlen);
+ if (copy_from_user(&ro->loopback, optval, optlen))
+ return -EFAULT;
break;
@@ -539,7 +537,8 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
if (optlen != sizeof(ro->recv_own_msgs))
return -EINVAL;
- err = copy_from_user(&ro->recv_own_msgs, optval, optlen);
+ if (copy_from_user(&ro->recv_own_msgs, optval, optlen))
+ return -EFAULT;
break;
diff --git a/net/compat.c b/net/compat.c
index 80013fb69a61..01bf95d0832e 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -24,6 +24,8 @@
#include <net/scm.h>
#include <net/sock.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
#include <asm/uaccess.h>
#include <net/compat.h>
@@ -521,6 +523,121 @@ asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
}
return err;
}
+
+struct compat_group_req {
+ __u32 gr_interface;
+ struct __kernel_sockaddr_storage gr_group
+ __attribute__ ((aligned(4)));
+} __attribute__ ((packed));
+
+struct compat_group_source_req {
+ __u32 gsr_interface;
+ struct __kernel_sockaddr_storage gsr_group
+ __attribute__ ((aligned(4)));
+ struct __kernel_sockaddr_storage gsr_source
+ __attribute__ ((aligned(4)));
+} __attribute__ ((packed));
+
+struct compat_group_filter {
+ __u32 gf_interface;
+ struct __kernel_sockaddr_storage gf_group
+ __attribute__ ((aligned(4)));
+ __u32 gf_fmode;
+ __u32 gf_numsrc;
+ struct __kernel_sockaddr_storage gf_slist[1]
+ __attribute__ ((aligned(4)));
+} __attribute__ ((packed));
+
+
+int compat_mc_setsockopt(struct sock *sock, int level, int optname,
+ char __user *optval, int optlen,
+ int (*setsockopt)(struct sock *,int,int,char __user *,int))
+{
+ char __user *koptval = optval;
+ int koptlen = optlen;
+
+ switch (optname) {
+ case MCAST_JOIN_GROUP:
+ case MCAST_LEAVE_GROUP:
+ {
+ struct compat_group_req __user *gr32 = (void *)optval;
+ struct group_req __user *kgr =
+ compat_alloc_user_space(sizeof(struct group_req));
+ u32 interface;
+
+ if (!access_ok(VERIFY_READ, gr32, sizeof(*gr32)) ||
+ !access_ok(VERIFY_WRITE, kgr, sizeof(struct group_req)) ||
+ __get_user(interface, &gr32->gr_interface) ||
+ __put_user(interface, &kgr->gr_interface) ||
+ copy_in_user(&kgr->gr_group, &gr32->gr_group,
+ sizeof(kgr->gr_group)))
+ return -EFAULT;
+ koptval = (char __user *)kgr;
+ koptlen = sizeof(struct group_req);
+ break;
+ }
+ case MCAST_JOIN_SOURCE_GROUP:
+ case MCAST_LEAVE_SOURCE_GROUP:
+ case MCAST_BLOCK_SOURCE:
+ case MCAST_UNBLOCK_SOURCE:
+ {
+ struct compat_group_source_req __user *gsr32 = (void *)optval;
+ struct group_source_req *kgsr = compat_alloc_user_space(
+ sizeof(struct group_source_req));
+ u32 interface;
+
+ if (!access_ok(VERIFY_READ, gsr32, sizeof(*gsr32)) ||
+ !access_ok(VERIFY_WRITE, kgsr,
+ sizeof(struct group_source_req)) ||
+ __get_user(interface, &gsr32->gsr_interface) ||
+ __put_user(interface, &kgsr->gsr_interface) ||
+ copy_in_user(&kgsr->gsr_group, &gsr32->gsr_group,
+ sizeof(kgsr->gsr_group)) ||
+ copy_in_user(&kgsr->gsr_source, &gsr32->gsr_source,
+ sizeof(kgsr->gsr_source)))
+ return -EFAULT;
+ koptval = (char __user *)kgsr;
+ koptlen = sizeof(struct group_source_req);
+ break;
+ }
+ case MCAST_MSFILTER:
+ {
+ struct compat_group_filter __user *gf32 = (void *)optval;
+ struct group_filter *kgf;
+ u32 interface, fmode, numsrc;
+
+ if (!access_ok(VERIFY_READ, gf32, sizeof(*gf32)) ||
+ __get_user(interface, &gf32->gf_interface) ||
+ __get_user(fmode, &gf32->gf_fmode) ||
+ __get_user(numsrc, &gf32->gf_numsrc))
+ return -EFAULT;
+ koptlen = optlen + sizeof(struct group_filter) -
+ sizeof(struct compat_group_filter);
+ if (koptlen < GROUP_FILTER_SIZE(numsrc))
+ return -EINVAL;
+ kgf = compat_alloc_user_space(koptlen);
+ if (!access_ok(VERIFY_WRITE, kgf, koptlen) ||
+ __put_user(interface, &kgf->gf_interface) ||
+ __put_user(fmode, &kgf->gf_fmode) ||
+ __put_user(numsrc, &kgf->gf_numsrc) ||
+ copy_in_user(&kgf->gf_group, &gf32->gf_group,
+ sizeof(kgf->gf_group)) ||
+ (numsrc && copy_in_user(&kgf->gf_slist, &gf32->gf_slist,
+ numsrc * sizeof(kgf->gf_slist[0]))))
+ return -EFAULT;
+ koptval = (char __user *)kgf;
+ break;
+ }
+
+ default:
+ break;
+ }
+ return setsockopt(sock, level, optname, koptval, koptlen);
+}
+
+EXPORT_SYMBOL(compat_mc_setsockopt);
+
+
/* Argument list sizes for compat_sys_socketcall */
#define AL(x) ((x) * sizeof(u32))
static unsigned char nas[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index d8adfd4972e2..4d8d95404f45 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -36,6 +36,7 @@
#include <linux/mroute.h>
#include <net/route.h>
#include <net/xfrm.h>
+#include <net/compat.h>
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <net/transp_v6.h>
#endif
@@ -923,6 +924,10 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname,
if (level != SOL_IP)
return -ENOPROTOOPT;
+ if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
+ return compat_mc_setsockopt(sk, level, optname, optval, optlen,
+ ip_setsockopt);
+
err = do_ip_setsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ac9b8482f702..0298f80681f2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4925,8 +4925,7 @@ step5:
tcp_data_snd_check(sk);
tcp_ack_snd_check(sk);
- if (tcp_defer_accept_check(sk))
- return -1;
+ tcp_defer_accept_check(sk);
return 0;
csum_error:
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index b2c9becc02e8..42814a2ec9d7 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -167,7 +167,7 @@ config IPV6_SIT
Tunneling means encapsulating data of one protocol type within
another protocol and sending it over a channel that understands the
encapsulating protocol. This driver implements encapsulation of IPv6
- into IPv4 packets. This is useful if you want to connect to IPv6
+ into IPv4 packets. This is useful if you want to connect two IPv6
networks over an IPv4-only path.
Saying M here will produce a module called sit.ko. If unsure, say Y.
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index c8c6e33d1163..2de3c464fe75 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -358,7 +358,7 @@ static int pim6_rcv(struct sk_buff *skb)
if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
(pim->flags & PIM_NULL_REGISTER) ||
(ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
- (u16)csum_fold(skb_checksum(skb, 0, skb->len, 0))))
+ csum_fold(skb_checksum(skb, 0, skb->len, 0))))
goto drop;
/* check if the inner packet is destined to mcast group */
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 06de9d0e1f6b..db6fdc1498aa 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -52,6 +52,7 @@
#include <net/udp.h>
#include <net/udplite.h>
#include <net/xfrm.h>
+#include <net/compat.h>
#include <asm/uaccess.h>
@@ -779,6 +780,10 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
if (level != SOL_IPV6)
return -ENOPROTOOPT;
+ if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
+ return compat_mc_setsockopt(sk, level, optname, optval, optlen,
+ ipv6_setsockopt);
+
err = do_ipv6_setsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 520a5180a4f6..a24b459dd45a 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -73,7 +73,9 @@ config MAC80211_MESH
config MAC80211_LEDS
bool "Enable LED triggers"
- depends on MAC80211 && LEDS_TRIGGERS
+ depends on MAC80211
+ select NEW_LEDS
+ select LEDS_TRIGGERS
---help---
This option enables a few LED triggers for different
packet receive/transmit events.
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 742003d3a841..9ee3affab346 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include <linux/jhash.h>
+#include <asm/unaligned.h>
#include "ieee80211_i.h"
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 02de8f1522a3..3df809222d1c 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -7,7 +7,6 @@
* published by the Free Software Foundation.
*/
-#include <asm/unaligned.h>
#include "mesh.h"
#define TEST_FRAME_LEN 8192
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 75d748eee0eb..e1770f7ba0b3 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -445,7 +445,7 @@ EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
/**
* xprt_wait_for_buffer_space - wait for transport output buffer to clear
* @task: task to be put to sleep
- *
+ * @action: function pointer to be executed after wait
*/
void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
{
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 6ad070d87702..ad487e8abcc2 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -70,10 +70,9 @@ static inline void msg_set_bits(struct tipc_msg *m, u32 w,
u32 pos, u32 mask, u32 val)
{
val = (val & mask) << pos;
- val = htonl(val);
- mask = htonl(mask << pos);
- m->hdr[w] &= ~mask;
- m->hdr[w] |= val;
+ mask = mask << pos;
+ m->hdr[w] &= ~htonl(mask);
+ m->hdr[w] |= htonl(val);
}
/*
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 8aa6440d689f..ac765dd9c7f5 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -129,8 +129,7 @@ static struct xfrm_algo_desc aead_list[] = {
static struct xfrm_algo_desc aalg_list[] = {
{
- .name = "hmac(digest_null)",
- .compat = "digest_null",
+ .name = "digest_null",
.uinfo = {
.auth = {
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 5dcc10b93c86..fac27ce770d5 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2112,7 +2112,7 @@ static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
iph6 = ipv6_hdr(skb);
audit_log_format(audit_buf,
" src=" NIP6_FMT " dst=" NIP6_FMT
- " flowlbl=0x%x%x%x",
+ " flowlbl=0x%x%02x%02x",
NIP6(iph6->saddr),
NIP6(iph6->daddr),
iph6->flow_lbl[0] & 0x0f,
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 263d04ab2d94..83cee18a02e9 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -5,7 +5,7 @@ use strict;
## Copyright (c) 1998 Michael Zucchi, All Rights Reserved ##
## Copyright (C) 2000, 1 Tim Waugh <twaugh@redhat.com> ##
## Copyright (C) 2001 Simon Huggins ##
-## Copyright (C) 2005-2007 Randy Dunlap ##
+## Copyright (C) 2005-2008 Randy Dunlap ##
## ##
## #define enhancements by Armin Kuster <akuster@mvista.com> ##
## Copyright (c) 2000 MontaVista Software, Inc. ##
@@ -366,6 +366,7 @@ foreach my $pattern (keys %highlights) {
# dumps section contents to arrays/hashes intended for that purpose.
#
sub dump_section {
+ my $file = shift;
my $name = shift;
my $contents = join "\n", @_;
@@ -379,6 +380,10 @@ sub dump_section {
$parameterdescs{$name} = $contents;
} else {
# print STDERR "other section '$name' = '$contents'\n";
+ if (defined($sections{$name}) && ($sections{$name} ne "")) {
+ print STDERR "Error(${file}:$.): duplicate section name '$name'\n";
+ ++$errors;
+ }
$sections{$name} = $contents;
push @sectionlist, $name;
}
@@ -388,6 +393,7 @@ sub dump_section {
# dump DOC: section after checking that it should go out
#
sub dump_doc_section {
+ my $file = shift;
my $name = shift;
my $contents = join "\n", @_;
@@ -399,7 +405,7 @@ sub dump_doc_section {
( $function_only == 1 && defined($function_table{$name})) ||
( $function_only == 2 && !defined($function_table{$name})))
{
- dump_section $name, $contents;
+ dump_section($file, $name, $contents);
output_blockhead({'sectionlist' => \@sectionlist,
'sections' => \%sections,
'module' => $modulename,
@@ -1923,7 +1929,7 @@ sub process_file($) {
print STDERR "Warning(${file}:$.): contents before sections\n";
++$warnings;
}
- dump_section($section, xml_escape($contents));
+ dump_section($file, $section, xml_escape($contents));
$section = $section_default;
}
@@ -1940,10 +1946,15 @@ sub process_file($) {
} elsif (/$doc_end/) {
if ($contents ne "") {
- dump_section($section, xml_escape($contents));
+ dump_section($file, $section, xml_escape($contents));
$section = $section_default;
$contents = "";
}
+ # look for doc_com + <text> + doc_end:
+ if ($_ =~ m'\s*\*\s*[a-zA-Z_0-9:\.]+\*/') {
+ print STDERR "Warning(${file}:$.): suspicious ending line: $_";
+ ++$warnings;
+ }
$prototype = "";
$state = 3;
@@ -1954,7 +1965,7 @@ sub process_file($) {
# @parameter line to signify start of description
if ($1 eq "" &&
($section =~ m/^@/ || $section eq $section_context)) {
- dump_section($section, xml_escape($contents));
+ dump_section($file, $section, xml_escape($contents));
$section = $section_default;
$contents = "";
} else {
@@ -1974,7 +1985,7 @@ sub process_file($) {
} elsif ($state == 4) {
# Documentation block
if (/$doc_block/) {
- dump_doc_section($section, xml_escape($contents));
+ dump_doc_section($file, $section, xml_escape($contents));
$contents = "";
$function = "";
%constants = ();
@@ -1992,7 +2003,7 @@ sub process_file($) {
}
elsif (/$doc_end/)
{
- dump_doc_section($section, xml_escape($contents));
+ dump_doc_section($file, $section, xml_escape($contents));
$contents = "";
$function = "";
%constants = ();
diff --git a/security/capability.c b/security/capability.c
index 2c6e06d18fab..38ac54e3aed1 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -44,6 +44,7 @@ static struct security_operations capability_ops = {
.task_setioprio = cap_task_setioprio,
.task_setnice = cap_task_setnice,
.task_post_setuid = cap_task_post_setuid,
+ .task_prctl = cap_task_prctl,
.task_reparent_to_init = cap_task_reparent_to_init,
.syslog = cap_syslog,
diff --git a/security/commoncap.c b/security/commoncap.c
index 852905789caf..e8c3f5e46705 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -24,11 +24,8 @@
#include <linux/hugetlb.h>
#include <linux/mount.h>
#include <linux/sched.h>
-
-/* Global security state */
-
-unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
-EXPORT_SYMBOL(securebits);
+#include <linux/prctl.h>
+#include <linux/securebits.h>
int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
{
@@ -368,7 +365,7 @@ void cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe)
/* AUD: Audit candidate if current->cap_effective is set */
- current->keep_capabilities = 0;
+ current->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
}
int cap_bprm_secureexec (struct linux_binprm *bprm)
@@ -448,7 +445,7 @@ static inline void cap_emulate_setxuid (int old_ruid, int old_euid,
{
if ((old_ruid == 0 || old_euid == 0 || old_suid == 0) &&
(current->uid != 0 && current->euid != 0 && current->suid != 0) &&
- !current->keep_capabilities) {
+ !issecure(SECURE_KEEP_CAPS)) {
cap_clear (current->cap_permitted);
cap_clear (current->cap_effective);
}
@@ -547,7 +544,7 @@ int cap_task_setnice (struct task_struct *p, int nice)
* this task could get inconsistent info. There can be no
* racing writer bc a task can only change its own caps.
*/
-long cap_prctl_drop(unsigned long cap)
+static long cap_prctl_drop(unsigned long cap)
{
if (!capable(CAP_SETPCAP))
return -EPERM;
@@ -556,6 +553,7 @@ long cap_prctl_drop(unsigned long cap)
cap_lower(current->cap_bset, cap);
return 0;
}
+
#else
int cap_task_setscheduler (struct task_struct *p, int policy,
struct sched_param *lp)
@@ -572,12 +570,99 @@ int cap_task_setnice (struct task_struct *p, int nice)
}
#endif
+int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5, long *rc_p)
+{
+ long error = 0;
+
+ switch (option) {
+ case PR_CAPBSET_READ:
+ if (!cap_valid(arg2))
+ error = -EINVAL;
+ else
+ error = !!cap_raised(current->cap_bset, arg2);
+ break;
+#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
+ case PR_CAPBSET_DROP:
+ error = cap_prctl_drop(arg2);
+ break;
+
+ /*
+ * The next four prctl's remain to assist with transitioning a
+ * system from legacy UID=0 based privilege (when filesystem
+ * capabilities are not in use) to a system using filesystem
+ * capabilities only - as the POSIX.1e draft intended.
+ *
+ * Note:
+ *
+ * PR_SET_SECUREBITS =
+ * issecure_mask(SECURE_KEEP_CAPS_LOCKED)
+ * | issecure_mask(SECURE_NOROOT)
+ * | issecure_mask(SECURE_NOROOT_LOCKED)
+ * | issecure_mask(SECURE_NO_SETUID_FIXUP)
+ * | issecure_mask(SECURE_NO_SETUID_FIXUP_LOCKED)
+ *
+ * will ensure that the current process and all of its
+ * children will be locked into a pure
+ * capability-based-privilege environment.
+ */
+ case PR_SET_SECUREBITS:
+ if ((((current->securebits & SECURE_ALL_LOCKS) >> 1)
+ & (current->securebits ^ arg2)) /*[1]*/
+ || ((current->securebits & SECURE_ALL_LOCKS
+ & ~arg2)) /*[2]*/
+ || (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/
+ || (cap_capable(current, CAP_SETPCAP) != 0)) { /*[4]*/
+ /*
+ * [1] no changing of bits that are locked
+ * [2] no unlocking of locks
+ * [3] no setting of unsupported bits
+ * [4] doing anything requires privilege (go read about
+ * the "sendmail capabilities bug")
+ */
+ error = -EPERM; /* cannot change a locked bit */
+ } else {
+ current->securebits = arg2;
+ }
+ break;
+ case PR_GET_SECUREBITS:
+ error = current->securebits;
+ break;
+
+#endif /* def CONFIG_SECURITY_FILE_CAPABILITIES */
+
+ case PR_GET_KEEPCAPS:
+ if (issecure(SECURE_KEEP_CAPS))
+ error = 1;
+ break;
+ case PR_SET_KEEPCAPS:
+ if (arg2 > 1) /* Note, we rely on arg2 being unsigned here */
+ error = -EINVAL;
+ else if (issecure(SECURE_KEEP_CAPS_LOCKED))
+ error = -EPERM;
+ else if (arg2)
+ current->securebits |= issecure_mask(SECURE_KEEP_CAPS);
+ else
+ current->securebits &=
+ ~issecure_mask(SECURE_KEEP_CAPS);
+ break;
+
+ default:
+ /* No functionality available - continue with default */
+ return 0;
+ }
+
+ /* Functionality provided */
+ *rc_p = error;
+ return 1;
+}
+
void cap_task_reparent_to_init (struct task_struct *p)
{
cap_set_init_eff(p->cap_effective);
cap_clear(p->cap_inheritable);
cap_set_full(p->cap_permitted);
- p->keep_capabilities = 0;
+ p->securebits = SECUREBITS_DEFAULT;
return;
}
diff --git a/security/dummy.c b/security/dummy.c
index b0232bbf427b..58d4dd1af5c7 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -604,7 +604,7 @@ static int dummy_task_kill (struct task_struct *p, struct siginfo *info,
}
static int dummy_task_prctl (int option, unsigned long arg2, unsigned long arg3,
- unsigned long arg4, unsigned long arg5)
+ unsigned long arg4, unsigned long arg5, long *rc_p)
{
return 0;
}
diff --git a/security/root_plug.c b/security/root_plug.c
index 6112d1404c81..a41cf42a4fa0 100644
--- a/security/root_plug.c
+++ b/security/root_plug.c
@@ -86,6 +86,7 @@ static struct security_operations rootplug_security_ops = {
.task_post_setuid = cap_task_post_setuid,
.task_reparent_to_init = cap_task_reparent_to_init,
+ .task_prctl = cap_task_prctl,
.bprm_check_security = rootplug_bprm_check_security,
};
diff --git a/security/security.c b/security/security.c
index 8a285c7b9962..d5cb5898d967 100644
--- a/security/security.c
+++ b/security/security.c
@@ -733,9 +733,9 @@ int security_task_wait(struct task_struct *p)
}
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
- unsigned long arg4, unsigned long arg5)
+ unsigned long arg4, unsigned long arg5, long *rc_p)
{
- return security_ops->task_prctl(option, arg2, arg3, arg4, arg5);
+ return security_ops->task_prctl(option, arg2, arg3, arg4, arg5, rc_p);
}
void security_task_reparent_to_init(struct task_struct *p)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 308e2cf17d75..04acb5af8317 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3303,12 +3303,13 @@ static int selinux_task_prctl(int option,
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
- unsigned long arg5)
+ unsigned long arg5,
+ long *rc_p)
{
/* The current prctl operations do not appear to require
any SELinux controls since they merely observe or modify
the state of the current process. */
- return 0;
+ return secondary_ops->task_prctl(option, arg2, arg3, arg4, arg5, rc_p);
}
static int selinux_task_wait(struct task_struct *p)
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 4215971434e6..77ec16a3b68b 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1242,7 +1242,7 @@ static void smack_set_catset(char *catset, struct netlbl_lsm_secattr *sap)
int rc;
int byte;
- if (catset == 0)
+ if (!catset)
return;
sap->flags |= NETLBL_SECATTR_MLS_CAT;
@@ -2495,6 +2495,7 @@ struct security_operations smack_ops = {
.task_wait = smack_task_wait,
.task_reparent_to_init = cap_task_reparent_to_init,
.task_to_inode = smack_task_to_inode,
+ .task_prctl = cap_task_prctl,
.ipc_permission = smack_ipc_permission,
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 6ba283783b70..a5da5a8cfe9b 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -317,7 +317,7 @@ static const struct file_operations smk_load_ops = {
/**
* smk_cipso_doi - initialize the CIPSO domain
*/
-void smk_cipso_doi(void)
+static void smk_cipso_doi(void)
{
int rc;
struct cipso_v4_doi *doip;
@@ -350,7 +350,7 @@ void smk_cipso_doi(void)
/**
* smk_unlbl_ambient - initialize the unlabeled domain
*/
-void smk_unlbl_ambient(char *oldambient)
+static void smk_unlbl_ambient(char *oldambient)
{
int rc;
struct netlbl_audit audit_info;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b2e12893e3f4..c82cf15730a1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -40,6 +40,7 @@
#include <linux/kvm_para.h>
#include <linux/pagemap.h>
#include <linux/mman.h>
+#include <linux/swap.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -59,7 +60,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
static __read_mostly struct preempt_ops kvm_preempt_ops;
-static struct dentry *debugfs_dir;
+struct dentry *kvm_debugfs_dir;
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
@@ -119,6 +120,29 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
smp_call_function_mask(cpus, ack_flush, NULL, 1);
}
+void kvm_reload_remote_mmus(struct kvm *kvm)
+{
+ int i, cpu;
+ cpumask_t cpus;
+ struct kvm_vcpu *vcpu;
+
+ cpus_clear(cpus);
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ vcpu = kvm->vcpus[i];
+ if (!vcpu)
+ continue;
+ if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
+ continue;
+ cpu = vcpu->cpu;
+ if (cpu != -1 && cpu != raw_smp_processor_id())
+ cpu_set(cpu, cpus);
+ }
+ if (cpus_empty(cpus))
+ return;
+ smp_call_function_mask(cpus, ack_flush, NULL, 1);
+}
+
+
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
struct page *page;
@@ -170,6 +194,7 @@ static struct kvm *kvm_create_vm(void)
mutex_init(&kvm->lock);
kvm_io_bus_init(&kvm->mmio_bus);
init_rwsem(&kvm->slots_lock);
+ atomic_set(&kvm->users_count, 1);
spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
spin_unlock(&kvm_lock);
@@ -189,9 +214,13 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
vfree(free->dirty_bitmap);
+ if (!dont || free->lpage_info != dont->lpage_info)
+ vfree(free->lpage_info);
+
free->npages = 0;
free->dirty_bitmap = NULL;
free->rmap = NULL;
+ free->lpage_info = NULL;
}
void kvm_free_physmem(struct kvm *kvm)
@@ -215,11 +244,25 @@ static void kvm_destroy_vm(struct kvm *kvm)
mmdrop(mm);
}
+void kvm_get_kvm(struct kvm *kvm)
+{
+ atomic_inc(&kvm->users_count);
+}
+EXPORT_SYMBOL_GPL(kvm_get_kvm);
+
+void kvm_put_kvm(struct kvm *kvm)
+{
+ if (atomic_dec_and_test(&kvm->users_count))
+ kvm_destroy_vm(kvm);
+}
+EXPORT_SYMBOL_GPL(kvm_put_kvm);
+
+
static int kvm_vm_release(struct inode *inode, struct file *filp)
{
struct kvm *kvm = filp->private_data;
- kvm_destroy_vm(kvm);
+ kvm_put_kvm(kvm);
return 0;
}
@@ -301,6 +344,25 @@ int __kvm_set_memory_region(struct kvm *kvm,
new.user_alloc = user_alloc;
new.userspace_addr = mem->userspace_addr;
}
+ if (npages && !new.lpage_info) {
+ int largepages = npages / KVM_PAGES_PER_HPAGE;
+ if (npages % KVM_PAGES_PER_HPAGE)
+ largepages++;
+ if (base_gfn % KVM_PAGES_PER_HPAGE)
+ largepages++;
+
+ new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
+
+ if (!new.lpage_info)
+ goto out_free;
+
+ memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
+
+ if (base_gfn % KVM_PAGES_PER_HPAGE)
+ new.lpage_info[0].write_count = 1;
+ if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
+ new.lpage_info[largepages-1].write_count = 1;
+ }
/* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
@@ -397,6 +459,12 @@ int is_error_page(struct page *page)
}
EXPORT_SYMBOL_GPL(is_error_page);
+int is_error_pfn(pfn_t pfn)
+{
+ return pfn == bad_pfn;
+}
+EXPORT_SYMBOL_GPL(is_error_pfn);
+
static inline unsigned long bad_hva(void)
{
return PAGE_OFFSET;
@@ -444,7 +512,7 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
-static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
+unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *slot;
@@ -458,7 +526,7 @@ static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
/*
* Requires current->mm->mmap_sem to be held
*/
-struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
+pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{
struct page *page[1];
unsigned long addr;
@@ -469,7 +537,7 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr)) {
get_page(bad_page);
- return bad_page;
+ return page_to_pfn(bad_page);
}
npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
@@ -477,27 +545,71 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
if (npages != 1) {
get_page(bad_page);
- return bad_page;
+ return page_to_pfn(bad_page);
}
- return page[0];
+ return page_to_pfn(page[0]);
+}
+
+EXPORT_SYMBOL_GPL(gfn_to_pfn);
+
+struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
+{
+ return pfn_to_page(gfn_to_pfn(kvm, gfn));
}
EXPORT_SYMBOL_GPL(gfn_to_page);
void kvm_release_page_clean(struct page *page)
{
- put_page(page);
+ kvm_release_pfn_clean(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_release_page_clean);
+void kvm_release_pfn_clean(pfn_t pfn)
+{
+ put_page(pfn_to_page(pfn));
+}
+EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
+
void kvm_release_page_dirty(struct page *page)
{
+ kvm_release_pfn_dirty(page_to_pfn(page));
+}
+EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
+
+void kvm_release_pfn_dirty(pfn_t pfn)
+{
+ kvm_set_pfn_dirty(pfn);
+ kvm_release_pfn_clean(pfn);
+}
+EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
+
+void kvm_set_page_dirty(struct page *page)
+{
+ kvm_set_pfn_dirty(page_to_pfn(page));
+}
+EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
+
+void kvm_set_pfn_dirty(pfn_t pfn)
+{
+ struct page *page = pfn_to_page(pfn);
if (!PageReserved(page))
SetPageDirty(page);
- put_page(page);
}
-EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
+EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
+
+void kvm_set_pfn_accessed(pfn_t pfn)
+{
+ mark_page_accessed(pfn_to_page(pfn));
+}
+EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
+
+void kvm_get_pfn(pfn_t pfn)
+{
+ get_page(pfn_to_page(pfn));
+}
+EXPORT_SYMBOL_GPL(kvm_get_pfn);
static int next_segment(unsigned long len, int offset)
{
@@ -554,7 +666,9 @@ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;
+ pagefault_disable();
r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
+ pagefault_enable();
if (r)
return -EFAULT;
return 0;
@@ -651,6 +765,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
* We will block until either an interrupt or a signal wakes us up
*/
while (!kvm_cpu_has_interrupt(vcpu)
+ && !kvm_cpu_has_pending_timer(vcpu)
&& !signal_pending(current)
&& !kvm_arch_vcpu_runnable(vcpu)) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -678,8 +793,10 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (vmf->pgoff == 0)
page = virt_to_page(vcpu->run);
+#ifdef CONFIG_X86
else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
page = virt_to_page(vcpu->arch.pio_data);
+#endif
else
return VM_FAULT_SIGBUS;
get_page(page);
@@ -701,11 +818,11 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
struct kvm_vcpu *vcpu = filp->private_data;
- fput(vcpu->kvm->filp);
+ kvm_put_kvm(vcpu->kvm);
return 0;
}
-static struct file_operations kvm_vcpu_fops = {
+static const struct file_operations kvm_vcpu_fops = {
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
.compat_ioctl = kvm_vcpu_ioctl,
@@ -723,9 +840,10 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
r = anon_inode_getfd(&fd, &inode, &file,
"kvm-vcpu", &kvm_vcpu_fops, vcpu);
- if (r)
+ if (r) {
+ kvm_put_kvm(vcpu->kvm);
return r;
- atomic_inc(&vcpu->kvm->filp->f_count);
+ }
return fd;
}
@@ -760,6 +878,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
mutex_unlock(&kvm->lock);
/* Now it's all set up, let userspace reach it */
+ kvm_get_kvm(kvm);
r = create_vcpu_fd(vcpu);
if (r < 0)
goto unlink;
@@ -802,28 +921,39 @@ static long kvm_vcpu_ioctl(struct file *filp,
r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
break;
case KVM_GET_REGS: {
- struct kvm_regs kvm_regs;
+ struct kvm_regs *kvm_regs;
- memset(&kvm_regs, 0, sizeof kvm_regs);
- r = kvm_arch_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
- if (r)
+ r = -ENOMEM;
+ kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
+ if (!kvm_regs)
goto out;
+ r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
+ if (r)
+ goto out_free1;
r = -EFAULT;
- if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
- goto out;
+ if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
+ goto out_free1;
r = 0;
+out_free1:
+ kfree(kvm_regs);
break;
}
case KVM_SET_REGS: {
- struct kvm_regs kvm_regs;
+ struct kvm_regs *kvm_regs;
- r = -EFAULT;
- if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
+ r = -ENOMEM;
+ kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
+ if (!kvm_regs)
goto out;
- r = kvm_arch_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
+ r = -EFAULT;
+ if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
+ goto out_free2;
+ r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
if (r)
- goto out;
+ goto out_free2;
r = 0;
+out_free2:
+ kfree(kvm_regs);
break;
}
case KVM_GET_SREGS: {
@@ -851,6 +981,30 @@ static long kvm_vcpu_ioctl(struct file *filp,
r = 0;
break;
}
+ case KVM_GET_MP_STATE: {
+ struct kvm_mp_state mp_state;
+
+ r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
+ if (r)
+ goto out;
+ r = -EFAULT;
+ if (copy_to_user(argp, &mp_state, sizeof mp_state))
+ goto out;
+ r = 0;
+ break;
+ }
+ case KVM_SET_MP_STATE: {
+ struct kvm_mp_state mp_state;
+
+ r = -EFAULT;
+ if (copy_from_user(&mp_state, argp, sizeof mp_state))
+ goto out;
+ r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
+ if (r)
+ goto out;
+ r = 0;
+ break;
+ }
case KVM_TRANSLATE: {
struct kvm_translation tr;
@@ -1005,7 +1159,7 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
-static struct file_operations kvm_vm_fops = {
+static const struct file_operations kvm_vm_fops = {
.release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl,
.compat_ioctl = kvm_vm_ioctl,
@@ -1024,12 +1178,10 @@ static int kvm_dev_ioctl_create_vm(void)
return PTR_ERR(kvm);
r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
if (r) {
- kvm_destroy_vm(kvm);
+ kvm_put_kvm(kvm);
return r;
}
- kvm->filp = file;
-
return fd;
}
@@ -1059,7 +1211,15 @@ static long kvm_dev_ioctl(struct file *filp,
r = -EINVAL;
if (arg)
goto out;
- r = 2 * PAGE_SIZE;
+ r = PAGE_SIZE; /* struct kvm_run */
+#ifdef CONFIG_X86
+ r += PAGE_SIZE; /* pio data page */
+#endif
+ break;
+ case KVM_TRACE_ENABLE:
+ case KVM_TRACE_PAUSE:
+ case KVM_TRACE_DISABLE:
+ r = kvm_trace_ioctl(ioctl, arg);
break;
default:
return kvm_arch_dev_ioctl(filp, ioctl, arg);
@@ -1232,9 +1392,9 @@ static void kvm_init_debug(void)
{
struct kvm_stats_debugfs_item *p;
- debugfs_dir = debugfs_create_dir("kvm", NULL);
+ kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
for (p = debugfs_entries; p->name; ++p)
- p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
+ p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
(void *)(long)p->offset,
stat_fops[p->kind]);
}
@@ -1245,7 +1405,7 @@ static void kvm_exit_debug(void)
for (p = debugfs_entries; p->name; ++p)
debugfs_remove(p->dentry);
- debugfs_remove(debugfs_dir);
+ debugfs_remove(kvm_debugfs_dir);
}
static int kvm_suspend(struct sys_device *dev, pm_message_t state)
@@ -1272,6 +1432,7 @@ static struct sys_device kvm_sysdev = {
};
struct page *bad_page;
+pfn_t bad_pfn;
static inline
struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
@@ -1313,6 +1474,8 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
goto out;
}
+ bad_pfn = page_to_pfn(bad_page);
+
r = kvm_arch_hardware_setup();
if (r < 0)
goto out_free_0;
@@ -1386,6 +1549,7 @@ EXPORT_SYMBOL_GPL(kvm_init);
void kvm_exit(void)
{
+ kvm_trace_cleanup();
misc_deregister(&kvm_dev);
kmem_cache_destroy(kvm_vcpu_cache);
sysdev_unregister(&kvm_sysdev);
diff --git a/virt/kvm/kvm_trace.c b/virt/kvm/kvm_trace.c
new file mode 100644
index 000000000000..0e495470788d
--- /dev/null
+++ b/virt/kvm/kvm_trace.c
@@ -0,0 +1,276 @@
+/*
+ * kvm trace
+ *
+ * It is designed to allow debugging traces of kvm to be generated
+ * on UP / SMP machines. Each trace entry can be timestamped so that
+ * it's possible to reconstruct a chronological record of trace events.
+ * The implementation refers to blktrace kernel support.
+ *
+ * Copyright (c) 2008 Intel Corporation
+ * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
+ *
+ * Authors: Feng(Eric) Liu, eric.e.liu@intel.com
+ *
+ * Date: Feb 2008
+ */
+
+#include <linux/module.h>
+#include <linux/relay.h>
+#include <linux/debugfs.h>
+
+#include <linux/kvm_host.h>
+
+#define KVM_TRACE_STATE_RUNNING (1 << 0)
+#define KVM_TRACE_STATE_PAUSE (1 << 1)
+#define KVM_TRACE_STATE_CLEARUP (1 << 2)
+
+struct kvm_trace {
+ int trace_state;
+ struct rchan *rchan;
+ struct dentry *lost_file;
+ atomic_t lost_records;
+};
+static struct kvm_trace *kvm_trace;
+
+struct kvm_trace_probe {
+ const char *name;
+ const char *format;
+ u32 cycle_in;
+ marker_probe_func *probe_func;
+};
+
+static inline int calc_rec_size(int cycle, int extra)
+{
+ int rec_size = KVM_TRC_HEAD_SIZE;
+
+ rec_size += extra;
+ return cycle ? rec_size += KVM_TRC_CYCLE_SIZE : rec_size;
+}
+
+static void kvm_add_trace(void *probe_private, void *call_data,
+ const char *format, va_list *args)
+{
+ struct kvm_trace_probe *p = probe_private;
+ struct kvm_trace *kt = kvm_trace;
+ struct kvm_trace_rec rec;
+ struct kvm_vcpu *vcpu;
+ int i, extra, size;
+
+ if (unlikely(kt->trace_state != KVM_TRACE_STATE_RUNNING))
+ return;
+
+ rec.event = va_arg(*args, u32);
+ vcpu = va_arg(*args, struct kvm_vcpu *);
+ rec.pid = current->tgid;
+ rec.vcpu_id = vcpu->vcpu_id;
+
+ extra = va_arg(*args, u32);
+ WARN_ON(!(extra <= KVM_TRC_EXTRA_MAX));
+ extra = min_t(u32, extra, KVM_TRC_EXTRA_MAX);
+ rec.extra_u32 = extra;
+
+ rec.cycle_in = p->cycle_in;
+
+ if (rec.cycle_in) {
+ u64 cycle = 0;
+
+ cycle = get_cycles();
+ rec.u.cycle.cycle_lo = (u32)cycle;
+ rec.u.cycle.cycle_hi = (u32)(cycle >> 32);
+
+ for (i = 0; i < rec.extra_u32; i++)
+ rec.u.cycle.extra_u32[i] = va_arg(*args, u32);
+ } else {
+ for (i = 0; i < rec.extra_u32; i++)
+ rec.u.nocycle.extra_u32[i] = va_arg(*args, u32);
+ }
+
+ size = calc_rec_size(rec.cycle_in, rec.extra_u32 * sizeof(u32));
+ relay_write(kt->rchan, &rec, size);
+}
+
+static struct kvm_trace_probe kvm_trace_probes[] = {
+ { "kvm_trace_entryexit", "%u %p %u %u %u %u %u %u", 1, kvm_add_trace },
+ { "kvm_trace_handler", "%u %p %u %u %u %u %u %u", 0, kvm_add_trace },
+};
+
+static int lost_records_get(void *data, u64 *val)
+{
+ struct kvm_trace *kt = data;
+
+ *val = atomic_read(&kt->lost_records);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(kvm_trace_lost_ops, lost_records_get, NULL, "%llu\n");
+
+/*
+ * The relay channel is used in "no-overwrite" mode, it keeps trace of how
+ * many times we encountered a full subbuffer, to tell user space app the
+ * lost records there were.
+ */
+static int kvm_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
+ void *prev_subbuf, size_t prev_padding)
+{
+ struct kvm_trace *kt;
+
+ if (!relay_buf_full(buf))
+ return 1;
+
+ kt = buf->chan->private_data;
+ atomic_inc(&kt->lost_records);
+
+ return 0;
+}
+
+static struct dentry *kvm_create_buf_file_callack(const char *filename,
+ struct dentry *parent,
+ int mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ return debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+}
+
+static int kvm_remove_buf_file_callback(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+ return 0;
+}
+
+static struct rchan_callbacks kvm_relay_callbacks = {
+ .subbuf_start = kvm_subbuf_start_callback,
+ .create_buf_file = kvm_create_buf_file_callack,
+ .remove_buf_file = kvm_remove_buf_file_callback,
+};
+
+static int do_kvm_trace_enable(struct kvm_user_trace_setup *kuts)
+{
+ struct kvm_trace *kt;
+ int i, r = -ENOMEM;
+
+ if (!kuts->buf_size || !kuts->buf_nr)
+ return -EINVAL;
+
+ kt = kzalloc(sizeof(*kt), GFP_KERNEL);
+ if (!kt)
+ goto err;
+
+ r = -EIO;
+ atomic_set(&kt->lost_records, 0);
+ kt->lost_file = debugfs_create_file("lost_records", 0444, kvm_debugfs_dir,
+ kt, &kvm_trace_lost_ops);
+ if (!kt->lost_file)
+ goto err;
+
+ kt->rchan = relay_open("trace", kvm_debugfs_dir, kuts->buf_size,
+ kuts->buf_nr, &kvm_relay_callbacks, kt);
+ if (!kt->rchan)
+ goto err;
+
+ kvm_trace = kt;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_trace_probes); i++) {
+ struct kvm_trace_probe *p = &kvm_trace_probes[i];
+
+ r = marker_probe_register(p->name, p->format, p->probe_func, p);
+ if (r)
+ printk(KERN_INFO "Unable to register probe %s\n",
+ p->name);
+ }
+
+ kvm_trace->trace_state = KVM_TRACE_STATE_RUNNING;
+
+ return 0;
+err:
+ if (kt) {
+ if (kt->lost_file)
+ debugfs_remove(kt->lost_file);
+ if (kt->rchan)
+ relay_close(kt->rchan);
+ kfree(kt);
+ }
+ return r;
+}
+
+static int kvm_trace_enable(char __user *arg)
+{
+ struct kvm_user_trace_setup kuts;
+ int ret;
+
+ ret = copy_from_user(&kuts, arg, sizeof(kuts));
+ if (ret)
+ return -EFAULT;
+
+ ret = do_kvm_trace_enable(&kuts);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int kvm_trace_pause(void)
+{
+ struct kvm_trace *kt = kvm_trace;
+ int r = -EINVAL;
+
+ if (kt == NULL)
+ return r;
+
+ if (kt->trace_state == KVM_TRACE_STATE_RUNNING) {
+ kt->trace_state = KVM_TRACE_STATE_PAUSE;
+ relay_flush(kt->rchan);
+ r = 0;
+ }
+
+ return r;
+}
+
+void kvm_trace_cleanup(void)
+{
+ struct kvm_trace *kt = kvm_trace;
+ int i;
+
+ if (kt == NULL)
+ return;
+
+ if (kt->trace_state == KVM_TRACE_STATE_RUNNING ||
+ kt->trace_state == KVM_TRACE_STATE_PAUSE) {
+
+ kt->trace_state = KVM_TRACE_STATE_CLEARUP;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_trace_probes); i++) {
+ struct kvm_trace_probe *p = &kvm_trace_probes[i];
+ marker_probe_unregister(p->name, p->probe_func, p);
+ }
+
+ relay_close(kt->rchan);
+ debugfs_remove(kt->lost_file);
+ kfree(kt);
+ }
+}
+
+int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ long r = -EINVAL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ switch (ioctl) {
+ case KVM_TRACE_ENABLE:
+ r = kvm_trace_enable(argp);
+ break;
+ case KVM_TRACE_PAUSE:
+ r = kvm_trace_pause();
+ break;
+ case KVM_TRACE_DISABLE:
+ r = 0;
+ kvm_trace_cleanup();
+ break;
+ }
+
+ return r;
+}
OpenPOWER on IntegriCloud